diff --git a/.asf.yaml b/.asf.yaml new file mode 100644 index 00000000000..ac29efed9ff --- /dev/null +++ b/.asf.yaml @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +notifications: + commits: commits@cassandra.apache.org + issues: commits@cassandra.apache.org + pullrequests: pr@cassandra.apache.org + jira_options: link worklog + +github: + description: "Java Driver for Apache Cassandra®" + homepage: https://cassandra.apache.org/ + enabled_merge_buttons: + squash: false + merge: false + rebase: true + features: + wiki: false + issues: false + projects: false + autolink_jira: + - CASSANDRA + - CASSJAVA diff --git a/.gitignore b/.gitignore index eaf1a9ef8b2..07449882cc0 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ .project .java-version +.flattened-pom.xml .documenter_local_last_run /docs diff --git a/.snyk b/.snyk new file mode 100644 index 00000000000..a081b17225c --- /dev/null +++ b/.snyk @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities. +version: v1.22.2 +# ignores vulnerabilities until expiry date; change duration by modifying expiry date +ignore: + SNYK-JAVA-ORGGRAALVMSDK-2767964: + - '*': + reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year + expires: 2024-01-10T00:00:00.000Z + created: 2023-06-21T00:00:00.000Z + SNYK-JAVA-ORGGRAALVMSDK-2769618: + - '*': + reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year + expires: 2024-01-10T00:00:00.000Z + created: 2023-06-21T00:00:00.000Z + SNYK-JAVA-ORGGRAALVMSDK-5457933: + - '*': + reason: cannot upgrade to graal-sdk 22.1.0+ until we move off Java8, which is slated for later this year + expires: 2024-01-10T00:00:00.000Z + created: 2023-06-21T00:00:00.000Z diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index a7f970a8c20..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: java -sudo: false -# see https://sormuras.github.io/blog/2018-03-20-jdk-matrix.html -matrix: - include: - # 8 - - env: JDK='OpenJDK 8' - jdk: openjdk8 - # 11 - - env: JDK='OpenJDK 11' - # switch to JDK 11 before running tests - before_script: . $TRAVIS_BUILD_DIR/ci/install-jdk.sh -F 11 -L GPL -before_install: - # Require JDK8 for compiling - - jdk_switcher use openjdk8 -install: mvn install -DskipTests=true -Dmaven.javadoc.skip=true -B -V -script: mvn test -Djacoco.skip=true -B -V -cache: - directories: - - $HOME/.m2 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b34db231955..53857383cf2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,7 +1,28 @@ + + # Contributing guidelines ## Code formatting +### Java + We follow the [Google Java Style Guide](https://google.github.io/styleguide/javaguide.html). See https://github.com/google/google-java-format for IDE plugins. The rules are not configurable. @@ -11,11 +32,25 @@ The build will fail if the code is not formatted. To format all files from the c mvn fmt:format ``` -Some aspects are not covered by the formatter: +Some aspects are not covered by the formatter: braces must be used with `if`, `else`, `for`, `do` +and `while` statements, even when the body is empty or contains only a single statement. + +### XML -* braces must be used with `if`, `else`, `for`, `do` and `while` statements, even when the body is - empty or contains only a single statement. -* XML files: indent with two spaces and wrap to respect the column limit of 100 characters. +The build will fail if XML files are not formatted correctly. Run the following command before you +commit: + +```java +mvn xml-format:xml-format +``` + +The formatter does not enforce a maximum line length, but please try to keep it below 100 characters +to keep files readable across all mediums (IDE, terminal, Github...). + +### Other text files (markdown, etc) + +Similarly, enforce a right margin of 100 characters in those files. Editors and IDEs generally have +a way to configure this (for IDEA, install the "Wrap to column" plugin). ## Coding style -- production code @@ -128,43 +163,29 @@ line. When you add or review new code, take a moment to run the tests in `DEBUG` mode and check if the output looks good. -### No stream API +### Don't abuse the stream API -Please don't use `java.util.stream` in the driver codebase. Streams were designed for *data -processing*, not to make your collection traversals "functional". - -Here's an example from the driver codebase (`ChannelSet`): +The `java.util.stream` API is often used (abused?) as a "functional API for collections": ```java -DriverChannel[] snapshot = this.channels; -DriverChannel best = null; -int bestScore = 0; -for (DriverChannel channel : snapshot) { - int score = channel.availableIds(); - if (score > bestScore) { - bestScore = score; - best = channel; - } -} -return best; +List sizes = words.stream().map(String::length).collect(Collectors.toList()); ``` -And here's a terrible way to rewrite it using streams: - -```java -// Don't do this: -DriverChannel best = - Stream.of(snapshot) - .reduce((a, b) -> a.availableIds() > b.availableIds() ? a : b) - .get(); -``` +The perceived advantages of this approach over traditional for-loops are debatable: -The stream version is not easier to read, and will probably be slower (creating intermediary objects -vs. an array iteration, compounded by the fact that this particular array typically has a low -cardinality). +* readability: this is highly subjective. But consider the following: + * everyone can read for-loops, whether they are familiar with the Stream API or not. The opposite + is not true. + * the stream API does not spell out all the details: what kind of list does `Collectors.toList()` + return? Is it pre-sized? Mutable? Thread-safe? + * the stream API looks pretty on simple examples, but things can get ugly fast. Try rewriting + `NetworkTopologyReplicationStrategy` with streams. +* concision: this is irrelevant. When we look at code we care about maintainability, not how many + keystrokes the author saved. The for-loop version of the above example is just 5 lines long, and + your brain doesn't take longer to parse it. -The driver never does the kind of processing that the stream API is intended for; the only large -collections we manipulate are result sets, and these get passed on to the client directly. +The bottom line: don't try to "be functional" at all cost. Plain old for-loops are often just as +simple. ### Never assume a specific format for `toString()` @@ -216,6 +237,10 @@ Static imports are permitted in a couple of places: when(codecRegistry.codecFor(DataTypes.INT)).thenReturn(codec); verify(codec).decodePrimitive(any(ByteBuffer.class), eq(ProtocolVersion.DEFAULT)); ``` +* All Awaitility methods, e.g.: + ```java + await().until(() -> somethingBecomesTrue()); + ``` Test methods names use lower snake case, generally start with `should`, and clearly indicate the purpose of the test, for example: `should_fail_if_key_already_exists`. If you have trouble coming @@ -258,9 +283,11 @@ process, which can be either one of: For an example of a CCM-based test, see `PlainTextAuthProviderIT`. +#### Categories + Integration tests are divided into three categories: -#### Parallelizable tests +##### Parallelizable tests These tests can be run in parallel, to speed up the build. They either use: * dedicated Simulacron instances. These are lightweight, and Simulacron will manage the ports to @@ -268,7 +295,9 @@ These tests can be run in parallel, to speed up the build. They either use: * a shared, one-node CCM cluster. Each test works in its own keyspace. The build runs them with a configurable degree of parallelism (currently 8). The shared CCM cluster -is initialized the first time it's used, and stopped before moving on to serial tests. +is initialized the first time it's used, and stopped before moving on to serial tests. Note that we +run with `parallel=classes`, which means methods within the same class never run concurrent to each +other. To make an integration test parallelizable, annotate it with `@Category(ParallelizableTests.class)`. If you use CCM, it **must** be with `CcmRule`. @@ -276,7 +305,7 @@ If you use CCM, it **must** be with `CcmRule`. For an example of a Simulacron-based parallelizable test, see `NodeTargetingIT`. For a CCM-based test, see `DirectCompressionIT`. -#### Serial tests +##### Serial tests These tests cannot run in parallel, in general because they require CCM clusters of different sizes, or with a specific configuration (we never run more than one CCM cluster simultaneously: it would be @@ -293,7 +322,7 @@ Note: if multiple serial tests have a common "base" class, do not pull up `Custo child class must have its own instance. Otherwise they share the same CCM instance, and the first one destroys it on teardown. See `TokenITBase` for how to organize code in those cases. -#### Isolated tests +##### Isolated tests Not only can those tests not run in parallel, they also require specific environment tweaks, typically system properties that need to be set before initialization. @@ -305,6 +334,51 @@ To isolate an integration test, annotate it with `@Category(IsolatedTests.class) For an example, see `HeapCompressionIT`. +#### About test rules + +Do not mix `CcmRule` and `SimulacronRule` in the same test. It makes things harder to follow, and +can be inefficient (if the `SimulacronRule` is method-level, it will create a Simulacron cluster for +every test method, even those that only need CCM). + +##### Class-level rules + +Rules annotated with `@ClassRule` wrap the whole test class, and are reused across methods. Try to +use this as much as possible, as it's more efficient. The fields need to be static; also make them +final and use constant naming conventions, like `CCM_RULE`. + +When you use a server rule (`CcmRule` or `SimulacronRule`) and a `SessionRule` at the same level, +wrap them into a rule chain to ensure proper initialization order: + +```java +private static final CcmRule CCM_RULE = CcmRule.getInstance(); +private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + +@ClassRule +public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); +``` + +##### Method-level rules + +Rules annotated with `@Rule` wrap each test method. Use lower-camel case for field names: + +```java +private CcmRule ccmRule = CcmRule.getInstance(); +private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + +@ClassRule +public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); +``` + +Only use this for: + +* CCM tests that use `@CassandraRequirement` or `@DseRequirement` restrictions at the method level + (ex: `BatchStatementIT`). +* tests where you *really* need to restart from a clean state for every method. + +##### Mixed + +It's also possible to use a `@ClassRule` for CCM / Simulacron, and a `@Rule` for the session rule. +In that case, you don't need to use a rule chain. ## Running the tests @@ -368,6 +442,23 @@ the script to only test what's actually being committed, but I couldn't get it t (it's still in there but commented). Keep this in mind when you commit, and don't forget to re-add the changes if the first attempt failed and you fixed the tests. +## Speeding up the build for local tests + +If you need to install something in your local repository quickly, you can use the `fast` profile to +skip all "non-essential" checks (licenses, formatting, tests, etc): + +``` +mvn clean install -Pfast +``` + +You can speed things up even more by targeting specific modules with the `-pl` option: + +``` +mvn clean install -Pfast -pl core,query-builder,mapper-runtime,mapper-processor,bom +``` + +Please run the normal build at least once before you push your changes. + ## Commits Keep your changes **focused**. Each commit should have a single, clear purpose expressed in its diff --git a/Jenkinsfile-asf b/Jenkinsfile-asf new file mode 100644 index 00000000000..4b5041903c1 --- /dev/null +++ b/Jenkinsfile-asf @@ -0,0 +1,81 @@ +#!groovy + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +pipeline { + agent { + label 'cassandra-small' + } + + triggers { + // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) + cron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? '@weekly' : '') + } + + stages { + stage('Matrix') { + matrix { + axes { + axis { + name 'TEST_JAVA_VERSION' + values 'openjdk@1.8.0-292', 'openjdk@1.11.0-9', 'openjdk@1.17.0', 'openjdk@1.21.0' + } + axis { + name 'SERVER_VERSION' + values '3.11', + '4.0', + '4.1', + '5.0' + } + } + stages { + stage('Tests') { + agent { + label 'cassandra-medium' + } + steps { + script { + executeTests() + junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true + junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true + } + } + } + } + } + } + } +} + +def executeTests() { + def testJavaMajorVersion = (TEST_JAVA_VERSION =~ /@(?:1\.)?(\d+)/)[0][1] + sh """ + container_id=\$(docker run -td -e TEST_JAVA_VERSION=${TEST_JAVA_VERSION} -e SERVER_VERSION=${SERVER_VERSION} -e TEST_JAVA_MAJOR_VERSION=${testJavaMajorVersion} -v \$(pwd):/home/docker/cassandra-java-driver apache.jfrog.io/cassan-docker/apache/cassandra-java-driver-testing-ubuntu2204 'sleep 2h') + docker exec --user root \$container_id bash -c \"sudo bash /home/docker/cassandra-java-driver/ci/create-user.sh docker \$(id -u) \$(id -g) /home/docker/cassandra-java-driver\" + docker exec --user docker \$container_id './cassandra-java-driver/ci/run-tests.sh' + ( nohup docker stop \$container_id >/dev/null 2>/dev/null & ) + """ +} + +// branch pattern for cron +// should match 3.x, 4.x, 4.5.x, etc +def branchPatternCron() { + ~'((\\d+(\\.[\\dx]+)+))' +} diff --git a/Jenkinsfile-datastax b/Jenkinsfile-datastax new file mode 100644 index 00000000000..602f33101ca --- /dev/null +++ b/Jenkinsfile-datastax @@ -0,0 +1,639 @@ +#!groovy +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +def initializeEnvironment() { + env.DRIVER_DISPLAY_NAME = 'Java Driver for Apache CassandraⓇ' + env.DRIVER_METRIC_TYPE = 'oss' + + env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" + env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" + env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" + + env.MAVEN_HOME = "${env.HOME}/.mvn/apache-maven-3.8.8" + env.PATH = "${env.MAVEN_HOME}/bin:${env.PATH}" + + /* + * As of JAVA-3042 JAVA_HOME is always set to JDK8 and this is currently necessary for mvn compile and DSE Search/Graph. + * To facilitate testing with JDK11/17 we feed the appropriate JAVA_HOME into the maven build via commandline. + * + * Maven command-line flags: + * - -DtestJavaHome=/path/to/java/home: overrides JAVA_HOME for surefire/failsafe tests, defaults to environment JAVA_HOME. + * - -Ptest-jdk-N: enables profile for running tests with a specific JDK version (substitute N for 8/11/17). + * + * Note test-jdk-N is also automatically loaded based off JAVA_HOME SDK version so testing with an older SDK is not supported. + * + * Environment variables: + * - JAVA_HOME: Path to JDK used for mvn (all steps except surefire/failsafe), Cassandra, DSE. + * - JAVA8_HOME: Path to JDK8 used for Cassandra/DSE if ccm determines JAVA_HOME is not compatible with the chosen backend. + * - TEST_JAVA_HOME: PATH to JDK used for surefire/failsafe testing. + * - TEST_JAVA_VERSION: TEST_JAVA_HOME SDK version number [8/11/17], used to configure test-jdk-N profile in maven (see above) + */ + + env.JAVA_HOME = sh(label: 'Get JAVA_HOME',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba which ${JABBA_VERSION}''', returnStdout: true).trim() + env.JAVA8_HOME = sh(label: 'Get JAVA8_HOME',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba which 1.8''', returnStdout: true).trim() + + sh label: 'Download Apache CassandraⓇ, DataStax Enterprise or DataStax HCD ',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba use 1.8 + . ${CCM_ENVIRONMENT_SHELL} ${SERVER_VERSION} + ''' + + if (env.SERVER_VERSION.split('-')[0] == 'dse') { + env.DSE_FIXED_VERSION = env.SERVER_VERSION.split('-')[1] + sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le + cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF +CCM_CASSANDRA_VERSION=${DSE_FIXED_VERSION} # maintain for backwards compatibility +CCM_VERSION=${DSE_FIXED_VERSION} +CCM_SERVER_TYPE=dse +DSE_VERSION=${DSE_FIXED_VERSION} +CCM_BRANCH=${DSE_FIXED_VERSION} +DSE_BRANCH=${DSE_FIXED_VERSION} +ENVIRONMENT_EOF + ''' + } + + if (env.SERVER_VERSION.split('-')[0] == 'hcd') { + env.HCD_FIXED_VERSION = env.SERVER_VERSION.split('-')[1] + sh label: 'Update environment for DataStax HCD', script: '''#!/bin/bash -le + cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF +CCM_CASSANDRA_VERSION=${HCD_FIXED_VERSION} # maintain for backwards compatibility +CCM_VERSION=${HCD_FIXED_VERSION} +CCM_SERVER_TYPE=hcd +HCD_VERSION=${HCD_FIXED_VERSION} +CCM_BRANCH=${HCD_FIXED_VERSION} +HCD_BRANCH=${HCD_FIXED_VERSION} +ENVIRONMENT_EOF + ''' + } + + sh label: 'Display Java and environment information',script: '''#!/bin/bash -le + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + . ${JABBA_SHELL} + jabba use 1.8 + + java -version + mvn -v + printenv | sort + ''' +} + +def buildDriver(jabbaVersion) { + def buildDriverScript = '''#!/bin/bash -le + + . ${JABBA_SHELL} + jabba use '''+jabbaVersion+''' + + echo "Building with Java version '''+jabbaVersion+'''" + + mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true + ''' + sh label: 'Build driver', script: buildDriverScript +} + +def executeTests() { + def testJavaHome = sh(label: 'Get TEST_JAVA_HOME',script: '''#!/bin/bash -le + . ${JABBA_SHELL} + jabba which ${JABBA_VERSION}''', returnStdout: true).trim() + def testJavaVersion = (JABBA_VERSION =~ /.*\.(\d+)/)[0][1] + + def executeTestScript = '''#!/bin/bash -le + # Load CCM environment variables + set -o allexport + . ${HOME}/environment.txt + set +o allexport + + . ${JABBA_SHELL} + jabba use 1.8 + + if [ "${JABBA_VERSION}" != "1.8" ]; then + SKIP_JAVADOCS=true + else + SKIP_JAVADOCS=false + fi + + INTEGRATION_TESTS_FILTER_ARGUMENT="" + if [ ! -z "${INTEGRATION_TESTS_FILTER}" ]; then + INTEGRATION_TESTS_FILTER_ARGUMENT="-Dit.test=${INTEGRATION_TESTS_FILTER}" + fi + printenv | sort + + mvn -B -V ${INTEGRATION_TESTS_FILTER_ARGUMENT} -T 1 verify \ + -Ptest-jdk-'''+testJavaVersion+''' \ + -DtestJavaHome='''+testJavaHome+''' \ + -DfailIfNoTests=false \ + -Dmaven.test.failure.ignore=true \ + -Dmaven.javadoc.skip=${SKIP_JAVADOCS} \ + -Dccm.version=${CCM_CASSANDRA_VERSION} \ + -Dccm.distribution=${CCM_SERVER_TYPE:cassandra} \ + -Dproxy.path=${HOME}/proxy \ + ${SERIAL_ITS_ARGUMENT} \ + ${ISOLATED_ITS_ARGUMENT} \ + ${PARALLELIZABLE_ITS_ARGUMENT} + ''' + echo "Invoking Maven with parameters test-jdk-${testJavaVersion} and testJavaHome = ${testJavaHome}" + sh label: 'Execute tests', script: executeTestScript +} + +def executeCodeCoverage() { + jacoco( + execPattern: '**/target/jacoco.exec', + classPattern: '**/classes', + sourcePattern: '**/src/main/java' + ) +} + +def notifySlack(status = 'started') { + // Notify Slack channel for every build except adhoc executions + if (params.ADHOC_BUILD_TYPE != 'BUILD-AND-EXECUTE-TESTS') { + // Set the global pipeline scoped environment (this is above each matrix) + env.BUILD_STATED_SLACK_NOTIFIED = 'true' + + def buildType = 'Commit' + if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { + buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" + } + + def color = 'good' // Green + if (status.equalsIgnoreCase('aborted')) { + color = '808080' // Grey + } else if (status.equalsIgnoreCase('unstable')) { + color = 'warning' // Orange + } else if (status.equalsIgnoreCase('failed')) { + color = 'danger' // Red + } + + def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] +<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" + if (!status.equalsIgnoreCase('Started')) { + message += """ +${status} after ${currentBuild.durationString - ' and counting'}""" + } + + slackSend color: "${color}", + channel: "#java-driver-dev-bots", + message: "${message}" + } +} + +def describePerCommitStage() { + script { + currentBuild.displayName = "Per-Commit build" + currentBuild.description = 'Per-Commit build and testing of development Apache CassandraⓇ and current DataStax Enterprise against Oracle JDK 8' + } +} + +def describeAdhocAndScheduledTestingStage() { + script { + if (params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION') { + // Ad-hoc build + currentBuild.displayName = "Adhoc testing" + currentBuild.description = "Testing ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} against JDK version ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION}" + } else { + // Scheduled build + currentBuild.displayName = "${params.CI_SCHEDULE.toLowerCase().replaceAll('_', ' ').capitalize()} schedule" + currentBuild.description = "Testing server versions [${params.CI_SCHEDULE_SERVER_VERSIONS}] against JDK version ${params.CI_SCHEDULE_JABBA_VERSION}" + } + } +} + +// branch pattern for cron +// should match 3.x, 4.x, 4.5.x, etc +def branchPatternCron() { + ~"((\\d+(\\.[\\dx]+)+))" +} + +pipeline { + agent none + + // Global pipeline timeout + options { + timeout(time: 10, unit: 'HOURS') + buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts + numToKeepStr: '50')) // Keep only the last 50 build records + } + + parameters { + choice( + name: 'ADHOC_BUILD_TYPE', + choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], + description: '''

Perform a adhoc build operation

+ + + + + + + + + + + + + + + +
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', + choices: ['4.0', // Previous Apache CassandraⓇ + '4.1', // Previous Apache CassandraⓇ + '5.0', // Current Apache CassandraⓇ + 'dse-4.8.16', // Previous EOSL DataStax Enterprise + 'dse-5.0.15', // Long Term Support DataStax Enterprise + 'dse-5.1.35', // Legacy DataStax Enterprise + 'dse-6.0.18', // Previous DataStax Enterprise + 'dse-6.7.17', // Previous DataStax Enterprise + 'dse-6.8.30', // Current DataStax Enterprise + 'dse-6.9.0', // Current DataStax Enterprise + 'hcd-1.0.0', // Current DataStax HCD + 'ALL'], + description: '''Apache Cassandra® and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS builds + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
4.0Apache Cassandra® v4.0.x
4.1Apache Cassandra® v4.1.x
5.0Apache Cassandra® v5.0.x
dse-4.8.16DataStax Enterprise v4.8.x (END OF SERVICE LIFE)
dse-5.0.15DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1.35DataStax Enterprise v5.1.x
dse-6.0.18DataStax Enterprise v6.0.x
dse-6.7.17DataStax Enterprise v6.7.x
dse-6.8.30DataStax Enterprise v6.8.x
dse-6.9.0DataStax Enterprise v6.9.x
hcd-1.0.0DataStax HCD v1.0.x
''') + choice( + name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION', + choices: [ + '1.8', // Oracle JDK version 1.8 (current default) + 'openjdk@1.11', // OpenJDK version 11 + 'openjdk@1.17', // OpenJDK version 17 + 'openjdk@1.21' // OpenJDK version 21 + ], + description: '''JDK version to use for TESTING when running adhoc BUILD-AND-EXECUTE-TESTS builds. All builds will use JDK8 for building the driver + + + + + + + + + + + + + + + + + + + + + + + +
ChoiceDescription
1.8Oracle JDK version 1.8 (Used for compiling regardless of choice)
openjdk@1.11OpenJDK version 11
openjdk@1.17OpenJDK version 17
openjdk@1.21OpenJDK version 21
''') + booleanParam( + name: 'SKIP_SERIAL_ITS', + defaultValue: false, + description: 'Flag to determine if serial integration tests should be skipped') + booleanParam( + name: 'SKIP_ISOLATED_ITS', + defaultValue: false, + description: 'Flag to determine if isolated integration tests should be skipped') + booleanParam( + name: 'SKIP_PARALLELIZABLE_ITS', + defaultValue: false, + description: 'Flag to determine if parallel integration tests should be skipped') + string( + name: 'INTEGRATION_TESTS_FILTER', + defaultValue: '', + description: '''

Run only the tests whose name match patterns

+ See Maven Failsafe Plugin for more information on filtering integration tests''') + choice( + name: 'CI_SCHEDULE', + choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS', 'MONTHLY'], + description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_SERVER_VERSIONS', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing server version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + string( + name: 'CI_SCHEDULE_JABBA_VERSION', + defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', + description: 'CI testing JDK version(s) to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') + } + + triggers { + // schedules only run against release branches (i.e. 3.x, 4.x, 4.5.x, etc.) + parameterizedCron(branchPatternCron().matcher(env.BRANCH_NAME).matches() ? """ + # Every weekend (Saturday, Sunday) around 2:00 AM + H 2 * * 0 %CI_SCHEDULE=WEEKENDS;CI_SCHEDULE_SERVER_VERSIONS=4.0 4.1 5.0 dse-4.8.16 dse-5.0.15 dse-5.1.35 dse-6.0.18 dse-6.7.17;CI_SCHEDULE_JABBA_VERSION=1.8 + # Every weeknight (Monday - Friday) around 12:00 PM noon + H 12 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=4.1 5.0 dse-6.8.30 dse-6.9.0 hcd-1.0.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.11 + H 12 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;CI_SCHEDULE_SERVER_VERSIONS=4.1 5.0 dse-6.8.30 dse-6.9.0 hcd-1.0.0;CI_SCHEDULE_JABBA_VERSION=openjdk@1.17 + """ : "") + } + + environment { + OS_VERSION = 'ubuntu/focal64/java-driver' + JABBA_SHELL = '/usr/lib/jabba/jabba.sh' + CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' + SERIAL_ITS_ARGUMENT = "-DskipSerialITs=${params.SKIP_SERIAL_ITS}" + ISOLATED_ITS_ARGUMENT = "-DskipIsolatedITs=${params.SKIP_ISOLATED_ITS}" + PARALLELIZABLE_ITS_ARGUMENT = "-DskipParallelizableITs=${params.SKIP_PARALLELIZABLE_ITS}" + INTEGRATION_TESTS_FILTER = "${params.INTEGRATION_TESTS_FILTER}" + } + + stages { + stage ('Per-Commit') { + options { + timeout(time: 2, unit: 'HOURS') + } + when { + beforeAgent true + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' } + not { buildingTag() } + } + } + + matrix { + axes { + axis { + name 'SERVER_VERSION' + values '4.0', // Previous Apache CassandraⓇ + '5.0', // Current Apache CassandraⓇ + 'dse-6.8.30', // Current DataStax Enterprise + 'dse-6.9.0', // Current DataStax Enterprise + 'hcd-1.0.0' // Current DataStax HCD + } + axis { + name 'JABBA_VERSION' + values '1.8', // jdk8 + 'openjdk@1.11', // jdk11 + 'openjdk@1.17', // jdk17 + 'openjdk@1.21' // jdk21 + } + } + + agent { + label "${OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describePerCommitStage() + } + } + stage('Build-Driver') { + steps { + buildDriver('1.8') + } + } + stage('Execute-Tests') { + steps { + catchError { + // Use the matrix JDK for testing + executeTests() + } + } + post { + always { + /* + * Empty results are possible + * + * - Build failures during mvn verify may exist so report may not be available + */ + junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true + junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true + } + } + } + stage('Execute-Code-Coverage') { + // Ensure the code coverage is run only once per-commit + when { environment name: 'SERVER_VERSION', value: '4.0' } + steps { + executeCodeCoverage() + } + } + } + } + post { + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + + stage('Adhoc-And-Scheduled-Testing') { + when { + beforeAgent true + allOf { + expression { (params.ADHOC_BUILD_TYPE == 'BUILD' && params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') || + params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } + not { buildingTag() } + anyOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } + allOf { + expression { params.ADHOC_BUILD_TYPE == 'BUILD' } + expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } + expression { params.CI_SCHEDULE_SERVER_VERSIONS != 'DO-NOT-CHANGE-THIS-SELECTION' } + } + } + } + } + + environment { + SERVER_VERSIONS = "${params.CI_SCHEDULE_SERVER_VERSIONS == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION : params.CI_SCHEDULE_SERVER_VERSIONS}" + JABBA_VERSION = "${params.CI_SCHEDULE_JABBA_VERSION == 'DO-NOT-CHANGE-THIS-SELECTION' ? params.ADHOC_BUILD_AND_EXECUTE_TESTS_JABBA_VERSION : params.CI_SCHEDULE_JABBA_VERSION}" + } + + matrix { + axes { + axis { + name 'SERVER_VERSION' + values '4.0', // Previous Apache CassandraⓇ + '4.1', // Previous Apache CassandraⓇ + '5.0', // Current Apache CassandraⓇ + 'dse-4.8.16', // Previous EOSL DataStax Enterprise + 'dse-5.0.15', // Last EOSL DataStax Enterprise + 'dse-5.1.35', // Legacy DataStax Enterprise + 'dse-6.0.18', // Previous DataStax Enterprise + 'dse-6.7.17', // Previous DataStax Enterprise + 'dse-6.8.30', // Current DataStax Enterprise + 'dse-6.9.0', // Current DataStax Enterprise + 'hcd-1.0.0' // Current DataStax HCD + } + } + when { + beforeAgent true + allOf { + expression { return env.SERVER_VERSIONS.split(' ').any { it =~ /(ALL|${env.SERVER_VERSION})/ } } + } + } + agent { + label "${env.OS_VERSION}" + } + + stages { + stage('Initialize-Environment') { + steps { + initializeEnvironment() + script { + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + notifySlack() + } + } + } + } + stage('Describe-Build') { + steps { + describeAdhocAndScheduledTestingStage() + } + } + stage('Build-Driver') { + steps { + buildDriver('1.8') + } + } + stage('Execute-Tests') { + steps { + catchError { + // Use the matrix JDK for testing + executeTests() + } + } + post { + always { + /* + * Empty results are possible + * + * - Build failures during mvn verify may exist so report may not be available + * - With boolean parameters to skip tests a failsafe report may not be available + */ + junit testResults: '**/target/surefire-reports/TEST-*.xml', allowEmptyResults: true + junit testResults: '**/target/failsafe-reports/TEST-*.xml', allowEmptyResults: true + } + } + } + stage('Execute-Code-Coverage') { + // Ensure the code coverage is run only once per-commit + when { + allOf { + environment name: 'SERVER_VERSION', value: '4.0' + environment name: 'JABBA_VERSION', value: '1.8' + } + } + steps { + executeCodeCoverage() + } + } + } + } + post { + aborted { + notifySlack('aborted') + } + success { + notifySlack('completed') + } + unstable { + notifySlack('unstable') + } + failure { + notifySlack('FAILED') + } + } + } + } +} diff --git a/LICENSE b/LICENSE index d6456956733..a157e31d058 100644 --- a/LICENSE +++ b/LICENSE @@ -200,3 +200,24 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +Apache Cassandra Java Driver bundles code and files from the following projects: + +JNR project +Copyright (C) 2008-2010 Wayne Meissner +This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. +see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java + +Protocol Buffers +Copyright 2008 Google Inc. +This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). +see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java + +Guava +Copyright (C) 2007 The Guava Authors +This product includes software developed as part of the Guava project ( https://guava.dev ). +see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java + +Copyright (C) 2018 Christian Stein +This product includes software developed by Christian Stein +see ci/install-jdk.sh diff --git a/LICENSE_binary b/LICENSE_binary new file mode 100644 index 00000000000..b59c6ec22bb --- /dev/null +++ b/LICENSE_binary @@ -0,0 +1,247 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Apache Cassandra Java Driver bundles code and files from the following projects: + +JNR project +Copyright (C) 2008-2010 Wayne Meissner +This product includes software developed as part of the JNR project ( https://github.com/jnr/jnr-ffi )s. +see core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java + +Protocol Buffers +Copyright 2008 Google Inc. +This product includes software developed as part of the Protocol Buffers project ( https://developers.google.com/protocol-buffers/ ). +see core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java + +Guava +Copyright (C) 2007 The Guava Authors +This product includes software developed as part of the Guava project ( https://guava.dev ). +see core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java + +Copyright (C) 2018 Christian Stein +This product includes software developed by Christian Stein +see ci/install-jdk.sh + +This product bundles Java Native Runtime - POSIX 3.1.15, +which is available under the Eclipse Public License version 2.0. +see licenses/jnr-posix.txt + +This product bundles jnr-x86asm 1.0.2, +which is available under the MIT License. +see licenses/jnr-x86asm.txt + +This product bundles ASM 9.2: a very small and fast Java bytecode manipulation framework, +which is available under the 3-Clause BSD License. +see licenses/asm.txt + +This product bundles HdrHistogram 2.1.12: A High Dynamic Range (HDR) Histogram, +which is available under the 2-Clause BSD License. +see licenses/HdrHistogram.txt + +This product bundles The Simple Logging Facade for Java (SLF4J) API 1.7.26, +which is available under the MIT License. +see licenses/slf4j-api.txt + +This product bundles Reactive Streams 1.0.3, +which is available under the MIT License. +see licenses/reactive-streams.txt diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 00000000000..8e27ae3e52f --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,5 @@ +Apache Cassandra Java Driver +Copyright 2012- The Apache Software Foundation + +This product includes software developed at The Apache Software +Foundation (http://www.apache.org/). diff --git a/NOTICE_binary.txt b/NOTICE_binary.txt new file mode 100644 index 00000000000..f6f11c298f6 --- /dev/null +++ b/NOTICE_binary.txt @@ -0,0 +1,249 @@ +Apache Cassandra Java Driver +Copyright 2012- The Apache Software Foundation + +This product includes software developed at The Apache Software +Foundation (http://www.apache.org/). + +This compiled product also includes Apache-licensed dependencies +that contain the following NOTICE information: + +================================================================== +io.netty:netty-handler NOTICE.txt +================================================================== +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified portion of 'Webbit', an event based +WebSocket and HTTP server, which can be obtained at: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product contains a modified portion of 'SLF4J', a simple logging +facade for Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * https://www.slf4j.org/ + +This product contains a modified portion of 'Apache Harmony', an open source +Java SE, which can be obtained at: + + * NOTICE: + * license/NOTICE.harmony.txt + * LICENSE: + * license/LICENSE.harmony.txt (Apache License 2.0) + * HOMEPAGE: + * https://archive.apache.org/dist/harmony/ + +This product contains a modified portion of 'jbzip2', a Java bzip2 compression +and decompression library written by Matthew J. Francis. It can be obtained at: + + * LICENSE: + * license/LICENSE.jbzip2.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jbzip2/ + +This product contains a modified portion of 'libdivsufsort', a C API library to construct +the suffix array and the Burrows-Wheeler transformed string for any input string of +a constant-size alphabet written by Yuta Mori. It can be obtained at: + + * LICENSE: + * license/LICENSE.libdivsufsort.txt (MIT License) + * HOMEPAGE: + * https://github.com/y-256/libdivsufsort + +This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, + which can be obtained at: + + * LICENSE: + * license/LICENSE.jctools.txt (ASL2 License) + * HOMEPAGE: + * https://github.com/JCTools/JCTools + +This product optionally depends on 'JZlib', a re-implementation of zlib in +pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product optionally depends on 'Compress-LZF', a Java library for encoding and +decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: + + * LICENSE: + * license/LICENSE.compress-lzf.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/ning/compress + +This product optionally depends on 'lz4', a LZ4 Java compression +and decompression library written by Adrien Grand. It can be obtained at: + + * LICENSE: + * license/LICENSE.lz4.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/yawkat/lz4-java + +This product optionally depends on 'lzma-java', a LZMA Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.lzma-java.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jponge/lzma-java + +This product optionally depends on 'zstd-jni', a zstd-jni Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.zstd-jni.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/luben/zstd-jni + +This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression +and decompression library written by William Kinney. It can be obtained at: + + * LICENSE: + * license/LICENSE.jfastlz.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jfastlz/ + +This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * https://github.com/google/protobuf + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * https://www.bouncycastle.org/ + +This product optionally depends on 'Snappy', a compression library produced +by Google Inc, which can be obtained at: + + * LICENSE: + * license/LICENSE.snappy.txt (New BSD License) + * HOMEPAGE: + * https://github.com/google/snappy + +This product optionally depends on 'JBoss Marshalling', an alternative Java +serialization API, which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jboss-remoting/jboss-marshalling + +This product optionally depends on 'Caliper', Google's micro- +benchmarking framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.caliper.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/google/caliper + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * https://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, which +can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * https://logging.apache.org/log4j/ + +This product optionally depends on 'Aalto XML', an ultra-high performance +non-blocking XML processor, which can be obtained at: + + * LICENSE: + * license/LICENSE.aalto-xml.txt (Apache License 2.0) + * HOMEPAGE: + * https://wiki.fasterxml.com/AaltoHome + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: + + * LICENSE: + * license/LICENSE.hpack.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/twitter/hpack + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: + + * LICENSE: + * license/LICENSE.hyper-hpack.txt (MIT License) + * HOMEPAGE: + * https://github.com/python-hyper/hpack/ + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: + + * LICENSE: + * license/LICENSE.nghttp2-hpack.txt (MIT License) + * HOMEPAGE: + * https://github.com/nghttp2/nghttp2/ + +This product contains a modified portion of 'Apache Commons Lang', a Java library +provides utilities for the java.lang API, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-lang.txt (Apache License 2.0) + * HOMEPAGE: + * https://commons.apache.org/proper/commons-lang/ + + +This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. + + * LICENSE: + * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/takari/maven-wrapper + +This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. +This private header is also used by Apple's open source + mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). + + * LICENSE: + * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) + * HOMEPAGE: + * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h + +This product optionally depends on 'Brotli4j', Brotli compression and +decompression for Java., which can be obtained at: + + * LICENSE: + * license/LICENSE.brotli4j.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/hyperxpro/Brotli4j diff --git a/README.md b/README.md index 8b85257af38..d8ef01d0964 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,42 @@ -# Datastax Java Driver for Apache Cassandra® +# Java Driver for Apache Cassandra® -[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.datastax.oss/java-driver-core/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.datastax.oss/java-driver-core) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/java-driver-core/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.apache.cassandra/java-driver-core) *If you're reading this on github.com, please note that this is the readme for the development version and that some features described here might not yet have been released. You can find the documentation for latest version through [DataStax Docs] or via the release tags, e.g. -[4.1.0](https://github.com/datastax/java-driver/tree/4.1.0).* +[4.17.0](https://github.com/datastax/java-driver/tree/4.17.0).* A modern, feature-rich and highly tunable Java client library for [Apache Cassandra®] \(2.1+) and -[DataStax Enterprise] \(4.7+), using exclusively Cassandra's binary protocol and Cassandra Query -Language v3. +[DataStax Enterprise] \(4.7+), and [DataStax Astra], using exclusively Cassandra's binary protocol +and Cassandra Query Language (CQL) v3. [DataStax Docs]: http://docs.datastax.com/en/developer/java-driver/ [Apache Cassandra®]: http://cassandra.apache.org/ -[DataStax Enterprise]: http://www.datastax.com/products/datastax-enterprise ## Getting the driver -The driver artifacts are published in Maven central, under the group id [com.datastax.oss]; there +The driver artifacts are published in Maven central, under the group id [org.apache.cassandra]; there are multiple modules, all prefixed with `java-driver-`. ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} - com.datastax.oss + org.apache.cassandra java-driver-query-builder - 4.1.0 + ${driver.version} + + + + org.apache.cassandra + java-driver-mapper-runtime + ${driver.version} ``` @@ -38,13 +44,23 @@ Note that the query builder is now published as a separate artifact, you'll need dependency if you plan to use it. Refer to each module's manual for more details ([core](manual/core/), [query -builder](manual/query_builder/)). +builder](manual/query_builder/), [mapper](manual/mapper)). + +[org.apache.cassandra]: http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.apache.cassandra%22 + +## Compatibility -[com.datastax.oss]: http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.datastax.oss%22 +The driver is compatible with Apache Cassandra® 2.1 and higher, DataStax Enterprise 4.7 and +higher, and DataStax Astra. + +It requires Java 8 or higher. + +Disclaimer: Some DataStax/DataStax Enterprise products might partially work on big-endian systems, +but DataStax does not officially support these systems. ## Migrating from previous versions -Java driver 4 is **not binary compatible** with previous versions. However, most of the concepts +Java Driver 4 is **not binary compatible** with previous versions. However, most of the concepts remain unchanged, and the new API will look very familiar to 2.x and 3.x users. See the [upgrade guide](upgrade_guide/) for details. @@ -55,22 +71,18 @@ See the [upgrade guide](upgrade_guide/) for details. * [API docs] * Bug tracking: [JIRA] * [Mailing list] -* Twitter: [@dsJavaDriver] tweets Java driver releases and important announcements (low frequency). - [@DataStaxEng] has more news, including other drivers, Cassandra, and DSE. * [Changelog] * [FAQ] -[API docs]: http://www.datastax.com/drivers/java/4.0 -[JIRA]: https://datastax-oss.atlassian.net/browse/JAVA +[API docs]: https://docs.datastax.com/en/drivers/java/4.17 +[JIRA]: https://issues.apache.org/jira/issues/?jql=project%20%3D%20CASSJAVA%20ORDER%20BY%20key%20DESC [Mailing list]: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user -[@dsJavaDriver]: https://twitter.com/dsJavaDriver -[@DataStaxEng]: https://twitter.com/datastaxeng [Changelog]: changelog/ [FAQ]: faq/ ## License -Copyright DataStax, Inc. +© The Apache Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -86,9 +98,8 @@ limitations under the License. ---- -DataStax is a registered trademark of DataStax, Inc. and its subsidiaries in the United States -and/or other countries. - Apache Cassandra, Apache, Tomcat, Lucene, Solr, Hadoop, Spark, TinkerPop, and Cassandra are trademarks of the [Apache Software Foundation](http://www.apache.org/) or its subsidiaries in Canada, the United States and/or other countries. + +Binary artifacts of this product bundle Java Native Runtime libraries, which is available under the Eclipse Public License version 2.0. diff --git a/bom/pom.xml b/bom/pom.xml new file mode 100644 index 00000000000..dd76153a9b1 --- /dev/null +++ b/bom/pom.xml @@ -0,0 +1,121 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + + java-driver-bom + pom + Apache Cassandra Java Driver - Bill Of Materials + + + + org.apache.cassandra + java-driver-core + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-core-shaded + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-mapper-processor + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-mapper-runtime + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-query-builder + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-guava-shaded + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-test-infra + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-metrics-micrometer + 4.19.3-SNAPSHOT + + + org.apache.cassandra + java-driver-metrics-microprofile + 4.19.3-SNAPSHOT + + + com.datastax.oss + native-protocol + 1.5.2 + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + flatten + process-resources + + flatten + + + + keep + expand + expand + expand + expand + expand + expand + expand + expand + expand + expand + expand + expand + remove + + true + + + + + + + diff --git a/build.yaml b/build.yaml deleted file mode 100644 index 79dd3b2c84e..00000000000 --- a/build.yaml +++ /dev/null @@ -1,22 +0,0 @@ -java: - - openjdk8 -os: - - ubuntu/bionic64/java-driver -cassandra: - - '2.1' - - '2.2' - - '3.0' - - '3.11' -build: - - type: maven - version: 3.2.5 - goals: verify --batch-mode - properties: | - ccm.version=$CCM_CASSANDRA_VERSION - - xunit: - - "**/target/surefire-reports/TEST-*.xml" - - "**/target/failsafe-reports/TEST-*.xml" - - jacoco: true -disable_commit_status: true -notify: - slack: java-driver-dev-bots diff --git a/changelog/README.md b/changelog/README.md index 3755de324c2..b01c3db3bf9 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,9 +1,467 @@ + + ## Changelog -### 4.2.0 (in progress) +### 4.19.2 + +- [bug] CASSJAVA-116: Retry or Speculative Execution with RequestIdGenerator throws "Duplicate Key" + +### 4.19.1 + +- [improvement] CASSJAVA-97: Let users inject an ID for each request and write to the custom payload +- [improvement] CASSJAVA-92: Add Local DC to driver connection info and provide visibility with nodetool clientstats +- [bug] PR 2025: Eliminate lock in ConcurrencyLimitingRequestThrottler +- [improvement] CASSJAVA-89: Fix deprecated table configs in Cassandra 5 +- [improvement] PR 2028: Remove unnecessary locking in DefaultNettyOptions +- [improvement] CASSJAVA-102: Fix revapi spurious complaints about optional dependencies +- [improvement] PR 2013: Add SubnetAddressTranslator +- [improvement] CASSJAVA-68: Improve DefaultCodecRegistry.CacheKey#hashCode() to eliminate Object[] allocation +- [improvement] PR 1989: Bump Jackson version to la(te)st 2.13.x, 2.13.5 +- [improvement] CASSJAVA-76: Make guava an optional dependency of java-driver-guava-shaded +- [bug] PR 2035: Prevent long overflow in SNI address resolution +- [improvement] CASSJAVA-77: 4.x: Upgrade Netty to 4.1.119 +- [improvement] CASSJAVA-40: Driver testing against Java 21 +- [improvement] CASSJAVA-90: Update native-protocol +- [improvement] CASSJAVA-80: Support configuration to disable DNS reverse-lookups for SAN validation + +### 4.19.0 + +- [bug] JAVA-3055: Prevent PreparedStatement cache to be polluted if a request is cancelled. +- [bug] JAVA-3168: Copy node info for contact points on initial node refresh only from first match by endpoint +- [improvement] JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0) +- [improvement] CASSJAVA-53: Update Guava version used in cassandra-java-driver +- [improvement] JAVA-3118: Add support for vector data type in Schema Builder, QueryBuilder +- [bug] CASSJAVA-55: Remove setting "Host" header for metadata requests +- [bug] JAVA-3057: Allow decoding a UDT that has more fields than expected +- [improvement] CASSJAVA-52: Bring java-driver-shaded-guava into the repo as a submodule +- [bug] CASSJAVA-2: TableMetadata#describe produces invalid CQL when a type of a column is a vector +- [bug] JAVA-3051: Memory leak in DefaultLoadBalancingPolicy measurement of response times +- [improvement] CASSJAVA-14: Query builder support for NOT CQL syntax +- [bug] CASSJAVA-12: DefaultSslEngineFactory missing null check on close +- [improvement] CASSJAVA-46: Expose table extensions via schema builders +- [bug] PR 1938: Fix uncaught exception during graceful channel shutdown after exceeding max orphan ids +- [improvement] PR 1607: Annotate BatchStatement, Statement, SimpleStatement methods with CheckReturnValue +- [improvement] CASSJAVA-41: Reduce lock held duration in ConcurrencyLimitingRequestThrottler +- [bug] JAVA-3149: Async Query Cancellation Not Propagated To RequestThrottler +- [bug] JAVA-3167: CompletableFutures.allSuccessful() may return never completed future +- [bug] PR 1620: Don't return empty routing key when partition key is unbound +- [improvement] PR 1623: Limit calls to Conversions.resolveExecutionProfile +- [improvement] CASSJAVA-29: Update target Cassandra versions for integration tests, support new 5.0.x + +### 4.18.1 + +- [improvement] JAVA-3142: Ability to specify ordering of remote local dc's via new configuration for graceful automatic failovers +- [bug] CASSANDRA-19457: Object reference in Micrometer metrics prevent GC from reclaiming Session instances +- [improvement] CASSANDRA-19468: Don't swallow exception during metadata refresh +- [bug] CASSANDRA-19333: Fix data corruption in VectorCodec when using heap buffers +- [improvement] CASSANDRA-19290: Replace uses of AttributeKey.newInstance +- [improvement] CASSANDRA-19352: Support native_transport_(address|port) + native_transport_port_ssl for DSE 6.8 (4.x edition) +- [improvement] CASSANDRA-19180: Support reloading keystore in cassandra-java-driver + +### 4.18.0 + +- [improvement] PR 1689: Add support for publishing percentile time series for the histogram metrics (nparaddi-walmart) +- [improvement] JAVA-3104: Do not eagerly pre-allocate array when deserializing CqlVector +- [improvement] JAVA-3111: upgrade jackson-databind to 2.13.4.2 to address gradle dependency issue +- [improvement] PR 1617: Improve ByteBufPrimitiveCodec readBytes (chibenwa) +- [improvement] JAVA-3095: Fix CREATE keyword in vector search example in upgrade guide +- [improvement] JAVA-3100: Update jackson-databind to 2.13.4.1 and jackson-jaxrs-json-provider to 2.13.4 to address recent CVEs +- [improvement] JAVA-3089: Forbid wildcard imports + +### 4.17.0 + +- [improvement] JAVA-3070: Make CqlVector and CqlDuration serializable +- [improvement] JAVA-3085: Initialize c.d.o.d.i.core.util.Dependency at Graal native image build-time +- [improvement] JAVA-3061: CqlVector API improvements, add support for accessing vectors directly as float arrays +- [improvement] JAVA-3042: Enable automated testing for Java17 +- [improvement] JAVA-3050: Upgrade Netty to 4.1.94 + +### 4.16.0 + +- [improvement] JAVA-3058: Clear prepared statement cache on UDT type change event +- [improvement] JAVA-3060: Add vector type, codec + support for parsing CQL type +- [improvement] DOC-2813: Add error handling guidance linking to a helpful blog post +- [improvement] JAVA-3045: Fix GraalVM native image support for GraalVM 22.2 + +### 4.15.0 + +- [improvement] JAVA-3041: Update Guava session sample code to use ProgrammaticArguments +- [improvement] JAVA-3022: Implement AddressTranslator for AWS PrivateLink +- [bug] JAVA-3021: Update table SchemaBuilder page to replace withPrimaryKey with withPartitionKey +- [bug] JAVA-3005: Node list refresh behavior in 4.x is different from 3.x +- [bug] JAVA-3002: spring-boot app keeps connecting to IP of replaced node +- [improvement] JAVA-3023 Upgrade Netty to 4.1.77 +- [improvement] JAVA-2995: CodecNotFoundException doesn't extend DriverException + +### 4.14.1 + +- [improvement] JAVA-3013: Upgrade dependencies to address CVEs and other security issues, 4.14.1 edition +- [improvement] JAVA-2977: Update Netty to resolve higher-priority CVEs +- [improvement] JAVA-3003: Update jnr-posix to address CVE-2014-4043 + +### 4.14.0 + +- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE +- [bug] JAVA-2987: BasicLoadBalancingPolicy remote computation assumes local DC is up and live +- [bug] JAVA-2992: Include options into DefaultTableMetadata equals and hash methods +- [improvement] JAVA-2982: Switch Esri geometry lib to an optional dependency +- [improvement] JAVA-2959: Don't throw NoNodeAvailableException when all connections busy + +### 4.13.0 + +- [improvement] JAVA-2940: Add GraalVM native image build configurations +- [improvement] JAVA-2953: Promote ProgrammaticPlainTextAuthProvider to the public API and add + credentials hot-reload +- [improvement] JAVA-2951: Accept multiple node state listeners, schema change listeners and request + trackers + +Merged from 4.12.x: + +- [bug] JAVA-2949: Provide mapper support for CompletionStage> +- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck + +### 4.12.1 + +Merged from 4.11.x: + +- [bug] JAVA-2949: Provide mapper support for CompletionStage> +- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck + +### 4.12.0 + +- [improvement] JAVA-2935: Make GetEntity and SetEntity methods resilient to incomplete data +- [improvement] JAVA-2944: Upgrade MicroProfile Metrics to 3.0 + +Merged from 4.11.x: + +- [bug] JAVA-2932: Make DefaultDriverConfigLoader.close() resilient to terminated executors +- [bug] JAVA-2945: Reinstate InternalDriverContext.getNodeFilter method +- [bug] JAVA-2947: Release buffer after decoding multi-slice frame +- [bug] JAVA-2946: Make MapperResultProducerService instances be located with user-provided class loader +- [bug] JAVA-2942: GraphStatement.setConsistencyLevel() is not effective +- [bug] JAVA-2941: Cannot add a single static column with the alter table API +- [bug] JAVA-2943: Prevent session leak with wrong keyspace name +- [bug] JAVA-2938: OverloadedException message is misleading + +### 4.11.3 + +- [bug] JAVA-2949: Provide mapper support for CompletionStage> +- [bug] JAVA-2950: Remove reference to Reflection class from DependencyCheck + +### 4.11.2 + +- [bug] JAVA-2932: Make DefaultDriverConfigLoader.close() resilient to terminated executors +- [bug] JAVA-2945: Reinstate InternalDriverContext.getNodeFilter method +- [bug] JAVA-2947: Release buffer after decoding multi-slice frame +- [bug] JAVA-2946: Make MapperResultProducerService instances be located with user-provided class loader +- [bug] JAVA-2942: GraphStatement.setConsistencyLevel() is not effective +- [bug] JAVA-2941: Cannot add a single static column with the alter table API +- [bug] JAVA-2943: Prevent session leak with wrong keyspace name +- [bug] JAVA-2938: OverloadedException message is misleading + +### 4.11.1 + +- [bug] JAVA-2910: Add a configuration option to support strong values for prepared statements cache +- [bug] JAVA-2936: Support Protocol V6 +- [bug] JAVA-2934: Handle empty non-final pages in ReactiveResultSetSubscription + +### 4.11.0 + +- [improvement] JAVA-2930: Allow Micrometer to record histograms for timers +- [improvement] JAVA-2914: Transform node filter into a more flexible node distance evaluator +- [improvement] JAVA-2929: Revisit node-level metric eviction +- [new feature] JAVA-2830: Add mapper support for Java streams +- [bug] JAVA-2928: Generate counter increment/decrement constructs compatible with legacy C* + versions +- [new feature] JAVA-2872: Ability to customize metric names and tags +- [bug] JAVA-2925: Consider protocol version unsupported when server requires USE_BETA flag for it +- [improvement] JAVA-2704: Remove protocol v5 beta status, add v6-beta +- [improvement] JAVA-2916: Annotate generated classes with `@SuppressWarnings` +- [bug] JAVA-2927: Make Dropwizard truly optional +- [improvement] JAVA-2917: Include GraalVM substitutions for request processors and geo codecs +- [bug] JAVA-2918: Exclude invalid peers from schema agreement checks + +### 4.10.0 + +- [improvement] JAVA-2907: Switch Tinkerpop to an optional dependency +- [improvement] JAVA-2904: Upgrade Jackson to 2.12.0 and Tinkerpop to 3.4.9 +- [bug] JAVA-2911: Prevent control connection from scheduling too many reconnections +- [bug] JAVA-2902: Consider computed values when validating constructors for immutable entities +- [new feature] JAVA-2899: Re-introduce cross-DC failover in driver 4 +- [new feature] JAVA-2900: Re-introduce consistency downgrading retries +- [new feature] JAVA-2903: BlockHound integration +- [improvement] JAVA-2877: Allow skipping validation for individual mapped entities +- [improvement] JAVA-2871: Allow keyspace exclusions in the metadata, and exclude system keyspaces + by default +- [improvement] JAVA-2449: Use non-cryptographic random number generation in Uuids.random() +- [improvement] JAVA-2893: Allow duplicate keys in DefaultProgrammaticDriverConfigLoaderBuilder +- [documentation] JAVA-2894: Clarify usage of Statement.setQueryTimestamp +- [bug] JAVA-2889: Remove TypeSafe imports from DriverConfigLoader +- [bug] JAVA-2883: Use root locale explicitly when changing string case +- [bug] JAVA-2890: Fix off-by-one error in UdtCodec +- [improvement] JAVA-2905: Prevent new connections from using a protocol version higher than the negotiated one +- [bug] JAVA-2647: Handle token types in QueryBuilder.literal() +- [bug] JAVA-2887: Handle composite profiles with more than one key and/or backed by only one profile + +### 4.9.0 + +- [documentation] JAVA-2823: Make Astra more visible in the docs +- [documentation] JAVA-2869: Advise against using 4.5.x-4.6.0 in the upgrade guide +- [documentation] JAVA-2868: Cover reconnect-on-init in the manual +- [improvement] JAVA-2827: Exclude unused Tinkerpop transitive dependencies +- [improvement] JAVA-2827: Remove dependency to Tinkerpop gremlin-driver +- [task] JAVA-2859: Upgrade Tinkerpop to 3.4.8 +- [bug] JAVA-2726: Fix Tinkerpop incompatibility with JPMS +- [bug] JAVA-2842: Remove security vulnerabilities introduced by Tinkerpop +- [bug] JAVA-2867: Revisit compressor substitutions +- [improvement] JAVA-2870: Optimize memory usage of token map +- [improvement] JAVA-2855: Allow selection of the metrics framework via the config +- [improvement] JAVA-2864: Revisit mapper processor's messaging +- [new feature] JAVA-2816: Support immutability and fluent accessors in the mapper +- [new feature] JAVA-2721: Add counter support in the mapper +- [bug] JAVA-2863: Reintroduce mapper processor dependency to SLF4J + +### 4.8.0 + +- [improvement] JAVA-2811: Add aliases for driver 3 method names +- [new feature] JAVA-2808: Provide metrics bindings for Micrometer and MicroProfile +- [new feature] JAVA-2773: Support new protocol v5 message format +- [improvement] JAVA-2841: Raise timeouts during connection initialization +- [bug] JAVA-2331: Unregister old metrics when a node gets removed or changes RPC address +- [improvement] JAVA-2850: Ignore credentials in secure connect bundle [DataStax Astra] +- [improvement] JAVA-2813: Don't fail when secure bundle is specified together with other options +- [bug] JAVA-2800: Exclude SLF4J from mapper-processor dependencies +- [new feature] JAVA-2819: Add DriverConfigLoader.fromString +- [improvement] JAVA-2431: Set all occurrences when bound variables are used multiple times +- [improvement] JAVA-2829: Log protocol negotiation messages at DEBUG level +- [bug] JAVA-2846: Give system properties the highest precedence in DefaultDriverConfigLoader +- [new feature] JAVA-2691: Provide driver 4 support for extra codecs +- [improvement] Allow injection of CodecRegistry on session builder +- [improvement] JAVA-2828: Add safe paging state wrapper +- [bug] JAVA-2835: Correctly handle unresolved addresses in DefaultEndPoint.equals +- [bug] JAVA-2838: Avoid ConcurrentModificationException when closing connection +- [bug] JAVA-2837: make StringCodec strict about unicode in ascii + +### 4.7.2 + +- [bug] JAVA-2821: Can't connect to DataStax Astra using driver 4.7.x + +### 4.7.1 + +- [bug] JAVA-2818: Remove root path only after merging non-programmatic configs + +### 4.7.0 +- [improvement] JAVA-2301: Introduce OSGi tests for the mapper +- [improvement] JAVA-2658: Refactor OSGi tests +- [bug] JAVA-2657: Add ability to specify the class loader to use for application-specific classpath resources +- [improvement] JAVA-2803: Add Graal substitutions for protocol compression +- [documentation] JAVA-2666: Document BOM and driver modules +- [documentation] JAVA-2613: Improve connection pooling documentation +- [new feature] JAVA-2793: Add composite config loader +- [new feature] JAVA-2792: Allow custom results in the mapper +- [improvement] JAVA-2663: Add Graal substitutions for native functions +- [improvement] JAVA-2747: Revisit semantics of Statement.setExecutionProfile/Name + +### 4.6.1 + +- [bug] JAVA-2676: Don't reschedule write coalescer after empty runs + +### 4.6.0 + +- [improvement] JAVA-2741: Make keyspace/table metadata impls serializable +- [bug] JAVA-2740: Extend peer validity check to include datacenter, rack and tokens +- [bug] JAVA-2744: Recompute token map when node is added +- [new feature] JAVA-2614: Provide a utility to emulate offset paging on the client side +- [new feature] JAVA-2718: Warn when the number of sessions exceeds a configurable threshold +- [improvement] JAVA-2664: Add a callback to inject the session in listeners +- [bug] JAVA-2698: TupleCodec and UdtCodec give wrong error message when parsing fails +- [improvement] JAVA-2435: Add automatic-module-names to the manifests +- [new feature] JAVA-2054: Add now_in_seconds to protocol v5 query messages +- [bug] JAVA-2711: Fix handling of UDT keys in the mapper +- [improvement] JAVA-2631: Add getIndex() shortcuts to TableMetadata +- [improvement] JAVA-2679: Add port information to QueryTrace and TraceEvent +- [improvement] JAVA-2184: Refactor DescribeIT to improve maintainability +- [new feature] JAVA-2600: Add map-backed config loader +- [new feature] JAVA-2105: Add support for transient replication +- [new feature] JAVA-2670: Provide base class for mapped custom codecs +- [new feature] JAVA-2633: Add execution profile argument to DAO mapper factory methods +- [improvement] JAVA-2667: Add ability to fail the build when integration tests fail +- [bug] JAVA-1861: Add Metadata.getClusterName() + +### 4.5.1 + +- [bug] JAVA-2673: Fix mapper generated code for UPDATE with TTL and IF condition + +### 4.5.0 + +- [bug] JAVA-2654: Make AdminRequestHandler handle integer serialization +- [improvement] JAVA-2618: Improve error handling in request handlers +- [new feature] JAVA-2064: Add support for DSE 6.8 graph options in schema builder +- [documentation] JAVA-2559: Fix GraphNode javadocs +- [improvement] JAVA-2281: Extend GraphBinaryDataTypesTest to other graph protocols +- [new feature] JAVA-2498: Add support for reactive graph queries +- [bug] JAVA-2572: Prevent race conditions when cancelling a continuous paging query +- [improvement] JAVA-2566: Introduce specific metrics for Graph queries +- [improvement] JAVA-2556: Make ExecutionInfo compatible with any Request type +- [improvement] JAVA-2571: Revisit usages of DseGraph.g +- [improvement] JAVA-2558: Revisit GraphRequestHandler +- [bug] JAVA-2508: Preserve backward compatibility in schema metadata types +- [bug] JAVA-2465: Avoid requesting 0 page when executing continuous paging queries +- [improvement] JAVA-2472: Enable speculative executions for paged graph queries +- [improvement] JAVA-1579: Change default result format to latest GraphSON format +- [improvement] JAVA-2496: Revisit timeouts for paged graph queries +- [bug] JAVA-2510: Fix GraphBinaryDataTypesTest Codec registry initialization +- [bug] JAVA-2492: Parse edge metadata using internal identifiers +- [improvement] JAVA-2282: Remove GraphSON3 support +- [new feature] JAVA-2098: Add filter predicates for collections +- [improvement] JAVA-2245: Rename graph engine Legacy to Classic and Modern to Core +- [new feature] JAVA-2099: Enable Paging Through DSE Driver for Gremlin Traversals (2.x) +- [new feature] JAVA-1898: Expose new table-level graph metadata +- [bug] JAVA-2642: Fix default value of max-orphan-requests +- [bug] JAVA-2644: Revisit channel selection when pool size > 1 +- [bug] JAVA-2630: Correctly handle custom classes in IndexMetadata.describe +- [improvement] JAVA-1556: Publish Maven Bill Of Materials POM +- [improvement] JAVA-2637: Bump Netty to 4.1.45 +- [bug] JAVA-2617: Reinstate generation of deps.txt for Insights +- [new feature] JAVA-2625: Provide user-friendly programmatic configuration for kerberos +- [improvement] JAVA-2624: Expose a config option for the connect timeout +- [improvement] JAVA-2592: Make reload support parameterizable for DefaultDriverConfigLoader +- [new feature] JAVA-2263: Add optional schema validation to the mapper + +### 4.4.0 + +This version brings in all functionality that was formerly only in the DataStax Enterprise driver, +such as the built-in support for reactive programming. Going forward, all new features will be +implemented in this single driver (for past DataStax Enterprise driver versions before the merge, +refer to the [DSE driver +changelog](https://docs.datastax.com/en/developer/java-driver-dse/latest/changelog/)). + +- [documentation] JAVA-2607: Improve visibility of driver dependencies section +- [documentation] JAVA-1975: Document the importance of using specific TinkerPop version +- [improvement] JAVA-2529: Standardize optional/excludable dependency checks +- [bug] JAVA-2598: Do not use context class loader when attempting to load classes +- [improvement] JAVA-2582: Don't propagate a future into SchemaQueriesFactory +- [documentation] JAVA-2542: Improve the javadocs of methods in CqlSession +- [documentation] JAVA-2609: Add docs for proxy authentication to unified driver +- [improvement] JAVA-2554: Improve efficiency of InsightsClient by improving supportsInsights check +- [improvement] JAVA-2601: Inject Google Tag Manager scripts in generated API documentation +- [improvement] JAVA-2551: Improve support for DETERMINISTIC and MONOTONIC functions +- [documentation] JAVA-2446: Revisit continuous paging javadocs +- [improvement] JAVA-2550: Remove warnings in ContinuousCqlRequestHandler when coordinator is not replica +- [improvement] JAVA-2569: Make driver compatible with Netty < 4.1.34 again +- [improvement] JAVA-2541: Improve error messages during connection initialization +- [improvement] JAVA-2530: Expose shortcuts for name-based UUIDs +- [improvement] JAVA-2547: Add method DriverConfigLoader.fromPath +- [improvement] JAVA-2528: Store suppressed exceptions in AllNodesFailedException +- [new feature] JAVA-2581: Add query builder support for indexed list assignments +- [improvement] JAVA-2596: Consider collection removals as idempotent in query builder +- [bug] JAVA-2555: Generate append/prepend constructs compatible with legacy C* versions +- [bug] JAVA-2584: Ensure codec registry is able to create codecs for collections of UDTs and tuples +- [bug] JAVA-2583: IS NOT NULL clause should be idempotent +- [improvement] JAVA-2442: Don't check for schema agreement twice when completing a DDL query +- [improvement] JAVA-2473: Don't reconnect control connection if protocol is downgraded +- [bug] JAVA-2556: Make ExecutionInfo compatible with any Request type +- [new feature] JAVA-2532: Add BoundStatement ReturnType for insert, update, and delete DAO methods +- [improvement] JAVA-2107: Add XML formatting plugin +- [bug] JAVA-2527: Allow AllNodesFailedException to accept more than one error per node +- [improvement] JAVA-2546: Abort schema refresh if a query fails + +### 4.3.1 + +- [bug] JAVA-2557: Accept any negative length when decoding elements of tuples and UDTs + +### 4.3.0 + +- [improvement] JAVA-2497: Ensure nodes and exceptions are serializable +- [bug] JAVA-2464: Fix initial schema refresh when reconnect-on-init is enabled +- [improvement] JAVA-2516: Enable hostname validation with Cloud +- [documentation]: JAVA-2460: Document how to determine the local DC +- [improvement] JAVA-2476: Improve error message when codec registry inspects a collection with a + null element +- [documentation] JAVA-2509: Mention file-based approach for Cloud configuration in the manual +- [improvement] JAVA-2447: Mention programmatic local DC method in Default LBP error message +- [improvement] JAVA-2459: Improve extensibility of existing load balancing policies +- [documentation] JAVA-2428: Add developer docs +- [documentation] JAVA-2503: Migrate Cloud "getting started" page to driver manual +- [improvement] JAVA-2484: Add errors for cloud misconfiguration +- [improvement] JAVA-2490: Allow to read the secure bundle from an InputStream +- [new feature] JAVA-2478: Allow to provide the secure bundle via URL +- [new feature] JAVA-2356: Support for DataStax Cloud API +- [improvement] JAVA-2407: Improve handling of logback configuration files in IDEs +- [improvement] JAVA-2434: Add support for custom cipher suites and host name validation to ProgrammaticSslEngineFactory +- [improvement] JAVA-2480: Upgrade Jackson to 2.10.0 +- [documentation] JAVA-2505: Annotate Node.getHostId() as nullable +- [improvement] JAVA-1708: Support DSE "everywhere" replication strategy +- [improvement] JAVA-2471: Consider DSE version when parsing the schema +- [improvement] JAVA-2444: Add method setRoutingKey(ByteBuffer...) to StatementBuilder +- [improvement] JAVA-2398: Improve support for optional dependencies in OSGi +- [improvement] JAVA-2452: Allow "none" as a compression option +- [improvement] JAVA-2419: Allow registration of user codecs at runtime +- [documentation] JAVA-2384: Add quick overview section to each manual page +- [documentation] JAVA-2412: Cover DDL query debouncing in FAQ and upgrade guide +- [documentation] JAVA-2416: Update paging section in the manual +- [improvement] JAVA-2402: Add setTracing(boolean) to StatementBuilder +- [bug] JAVA-2466: Set idempotence to null in BatchStatement.newInstance + +### 4.2.2 + +- [bug] JAVA-2475: Fix message size when query string contains Unicode surrogates +- [bug] JAVA-2470: Fix Session.OSS_DRIVER_COORDINATES for shaded JAR + +### 4.2.1 + +- [bug] JAVA-2454: Handle "empty" CQL type while parsing schema +- [improvement] JAVA-2455: Improve logging of schema refresh errors +- [documentation] JAVA-2429: Document expected types on DefaultDriverOption +- [documentation] JAVA-2426: Fix month pattern in CqlDuration documentation +- [bug] JAVA-2451: Make zero a valid estimated size for PagingIterableSpliterator +- [bug] JAVA-2443: Compute prepared statement PK indices for protocol v3 +- [bug] JAVA-2430: Use variable metadata to infer the routing keyspace on bound statements + +### 4.2.0 + +- [improvement] JAVA-2390: Add methods to set the SSL engine factory programmatically +- [improvement] JAVA-2379: Fail fast if prepared id doesn't match when repreparing on the fly +- [bug] JAVA-2375: Use per-request keyspace when repreparing on the fly +- [improvement] JAVA-2370: Remove auto-service plugin from mapper processor +- [improvement] JAVA-2377: Add a config option to make driver threads daemon +- [improvement] JAVA-2371: Handle null elements in collections on the decode path +- [improvement] JAVA-2351: Add a driver example for the object mapper +- [bug] JAVA-2323: Handle restart of a node with same host_id but a different address +- [improvement] JAVA-2303: Ignore peer rows matching the control host's RPC address +- [improvement] JAVA-2236: Add methods to set the auth provider programmatically +- [improvement] JAVA-2369: Change mapper annotations retention to runtime +- [improvement] JAVA-2365: Redeclare default constants when an enum is abstracted behind an + interface +- [improvement] JAVA-2302: Better target mapper errors and warnings for inherited methods +- [improvement] JAVA-2336: Expose byte utility methods in the public API +- [improvement] JAVA-2338: Revisit toString() for data container types +- [bug] JAVA-2367: Fix column names in EntityHelper.updateByPrimaryKey +- [bug] JAVA-2358: Fix list of reserved CQL keywords +- [improvement] JAVA-2359: Allow default keyspace at the mapper level +- [improvement] JAVA-2306: Clear security tokens from memory immediately after use +- [improvement] JAVA-2320: Expose more attributes on mapper Select for individual query clauses +- [bug] JAVA-2332: Destroy connection pool when a node gets removed - [bug] JAVA-2324: Add support for primitive shorts in mapper - [bug] JAVA-2325: Allow "is" prefix for boolean getters in mapped entities - [improvement] JAVA-2308: Add customWhereClause to `@Delete` @@ -11,6 +469,7 @@ - [bug] JAVA-2312: Handle UDTs with names that clash with collection types - [improvement] JAVA-2307: Improve `@Select` and `@Delete` by not requiring full primary key - [improvement] JAVA-2315: Improve extensibility of session builder +- [bug] JAVA-2394: BaseCcmRule DseRequirement max should use DseVersion, not Cassandra version ### 4.1.0 @@ -257,3 +716,1698 @@ - [new feature] JAVA-1501: Reprepare on the fly when we get an UNPREPARED response - [bug] JAVA-1499: Wait for load balancing policy at cluster initialization - [new feature] JAVA-1495: Add prepared statements + +## 3.11.5 +- [improvement] JAVA-3114: Shade io.dropwizard.metrics:metrics-core in shaded driver +- [improvement] JAVA-3115: SchemaChangeListener#onKeyspaceChanged can fire when keyspace has not changed if using SimpleStrategy replication + +## 3.11.4 +- [improvement] JAVA-3079: Upgrade Netty to 4.1.94, 3.x edition +- [improvement] JAVA-3082: Fix maven build for Apple-silicon +- [improvement] PR 1671: Fix LatencyAwarePolicy scale docstring + +## 3.11.3 + +- [improvement] JAVA-3023: Upgrade Netty to 4.1.77, 3.x edition + +## 3.11.2 + +- [improvement] JAVA-3008: Upgrade Netty to 4.1.75, 3.x edition +- [improvement] JAVA-2984: Upgrade Jackson to resolve high-priority CVEs + +## 3.11.1 + +- [bug] JAVA-2967: Support native transport peer information for DSE 6.8. +- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE. + +## 3.11.0 + +- [improvement] JAVA-2705: Remove protocol v5 beta status, add v6-beta. +- [bug] JAVA-2923: Detect and use Guava's new HostAndPort.getHost method. +- [bug] JAVA-2922: Switch to modern framing format inside a channel handler. +- [bug] JAVA-2924: Consider protocol version unsupported when server requires USE_BETA flag for it. + +### 3.10.2 + +- [bug] JAVA-2860: Avoid NPE if channel initialization crashes. + +### 3.10.1 + +- [bug] JAVA-2857: Fix NPE when built statements without parameters are logged at TRACE level. +- [bug] JAVA-2843: Successfully parse DSE table schema in OSS driver. + +### 3.10.0 + +- [improvement] JAVA-2676: Don't reschedule flusher after empty runs +- [new feature] JAVA-2772: Support new protocol v5 message format + +### 3.9.0 + +- [bug] JAVA-2627: Avoid logging error message including stack trace in request handler. +- [new feature] JAVA-2706: Add now_in_seconds to protocol v5 query messages. +- [improvement] JAVA-2730: Add support for Cassandra® 4.0 table options +- [improvement] JAVA-2702: Transient Replication Support for Cassandra® 4.0 + +### 3.8.0 + +- [new feature] JAVA-2356: Support for DataStax Cloud API. +- [improvement] JAVA-2483: Allow to provide secure bundle via URL. +- [improvement] JAVA-2499: Allow to read the secure bundle from an InputStream. +- [improvement] JAVA-2457: Detect CaaS and change default consistency. +- [improvement] JAVA-2485: Add errors for Cloud misconfiguration. +- [documentation] JAVA-2504: Migrate Cloud "getting started" page to driver manual. +- [improvement] JAVA-2516: Enable hostname validation with Cloud +- [bug] JAVA-2515: NEW_NODE and REMOVED_NODE events should trigger ADDED and REMOVED. + + +### 3.7.2 + +- [bug] JAVA-2249: Stop stripping trailing zeros in ByteOrderedTokens. +- [bug] JAVA-1492: Don't immediately reuse busy connections for another request. +- [bug] JAVA-2198: Handle UDTs with names that clash with collection types. +- [bug] JAVA-2204: Avoid memory leak when client holds onto a stale TableMetadata instance. + + +### 3.7.1 + +- [bug] JAVA-2174: Metadata.needsQuote should accept empty strings. +- [bug] JAVA-2193: Fix flaky tests in WarningsTest. + + +### 3.7.0 + +- [improvement] JAVA-2025: Include exception message in Abstract\*Codec.accepts(null). +- [improvement] JAVA-1980: Use covariant return types in RemoteEndpointAwareJdkSSLOptions.Builder methods. +- [documentation] JAVA-2062: Document frozen collection preference with Mapper. +- [bug] JAVA-2071: Fix NPE in ArrayBackedRow.toString(). +- [bug] JAVA-2070: Call onRemove instead of onDown when rack and/or DC information changes for a host. +- [improvement] JAVA-1256: Log parameters of BuiltStatement in QueryLogger. +- [documentation] JAVA-2074: Document preference for LZ4 over Snappy. +- [bug] JAVA-1612: Include netty-common jar in binary tarball. +- [improvement] JAVA-2003: Simplify CBUtil internal API to improve performance. +- [improvement] JAVA-2002: Reimplement TypeCodec.accepts to improve performance. +- [documentation] JAVA-2041: Deprecate cross-DC failover in DCAwareRoundRobinPolicy. +- [documentation] JAVA-1159: Document workaround for using tuple with udt field in Mapper. +- [documentation] JAVA-1964: Complete remaining "Coming Soon" sections in docs. +- [improvement] JAVA-1950: Log server side warnings returned from a query. +- [improvement] JAVA-2123: Allow to use QueryBuilder for building queries against Materialized Views. +- [bug] JAVA-2082: Avoid race condition during cluster close and schema refresh. + + +### 3.6.0 + +- [improvement] JAVA-1394: Add request-queue-depth metric. +- [improvement] JAVA-1857: Add Statement.setHost. +- [bug] JAVA-1920: Use nanosecond precision in LocalTimeCodec#format(). +- [bug] JAVA-1794: Driver tries to create a connection array of size -1. +- [new feature] JAVA-1899: Support virtual tables. +- [bug] JAVA-1908: TableMetadata.asCQLQuery does not add table option 'memtable_flush_period_in_ms' in the generated query. +- [bug] JAVA-1924: StatementWrapper setters should return the wrapping statement. +- [new feature] JAVA-1532: Add Codec support for Java 8's LocalDateTime and ZoneId. +- [improvement] JAVA-1786: Use Google code formatter. +- [bug] JAVA-1871: Change LOCAL\_SERIAL.isDCLocal() to return true. +- [documentation] JAVA-1902: Clarify unavailable & request error in DefaultRetryPolicy javadoc. +- [new feature] JAVA-1903: Add WhiteListPolicy.ofHosts. +- [bug] JAVA-1928: Fix GuavaCompatibility for Guava 26. +- [bug] JAVA-1935: Add null check in QueryConsistencyException.getHost. +- [improvement] JAVA-1771: Send driver name and version in STARTUP message. +- [improvement] JAVA-1388: Add dynamic port discovery for system.peers\_v2. +- [documentation] JAVA-1810: Note which setters are not propagated to PreparedStatement. +- [bug] JAVA-1944: Surface Read and WriteFailureException to RetryPolicy. +- [bug] JAVA-1211: Fix NPE in cluster close when cluster init fails. +- [bug] JAVA-1220: Fail fast on cluster init if previous init failed. +- [bug] JAVA-1929: Preempt session execute queries if session was closed. + +Merged from 3.5.x: + +- [bug] JAVA-1872: Retain table's views when processing table update. + + +### 3.5.0 + +- [improvement] JAVA-1448: TokenAwarePolicy should respect child policy ordering. +- [bug] JAVA-1751: Include defaultTimestamp length in encodedSize for protocol version >= 3. +- [bug] JAVA-1770: Fix message size when using Custom Payload. +- [documentation] JAVA-1760: Add metrics documentation. +- [improvement] JAVA-1765: Update dependencies to latest patch versions. +- [improvement] JAVA-1752: Deprecate DowngradingConsistencyRetryPolicy. +- [improvement] JAVA-1735: Log driver version on first use. +- [documentation] JAVA-1380: Add FAQ entry for errors arising from incompatibilities. +- [improvement] JAVA-1748: Support IS NOT NULL and != in query builder. +- [documentation] JAVA-1740: Mention C*2.2/3.0 incompatibilities in paging state manual. +- [improvement] JAVA-1725: Add a getNodeCount method to CCMAccess for easier automation. +- [new feature] JAVA-708: Add means to measure request sizes. +- [documentation] JAVA-1788: Add example for enabling host name verification to SSL docs. +- [improvement] JAVA-1791: Revert "JAVA-1677: Warn if auth is configured on the client but not the server." +- [bug] JAVA-1789: Account for flags in Prepare encodedSize. +- [bug] JAVA-1797: Use jnr-ffi version required by jnr-posix. + + +### 3.4.0 + +- [improvement] JAVA-1671: Remove unnecessary test on prepared statement metadata. +- [bug] JAVA-1694: Upgrade to jackson-databind 2.7.9.2 to address CVE-2015-15095. +- [documentation] JAVA-1685: Clarify recommendation on preparing SELECT *. +- [improvement] JAVA-1679: Improve error message on batch log write timeout. +- [improvement] JAVA-1672: Remove schema agreement check when repreparing on up. +- [improvement] JAVA-1677: Warn if auth is configured on the client but not the server. +- [new feature] JAVA-1651: Add NO_COMPACT startup option. +- [improvement] JAVA-1683: Add metrics to track writes to nodes. +- [new feature] JAVA-1229: Allow specifying the keyspace for individual queries. +- [improvement] JAVA-1682: Provide a way to record latencies for cancelled speculative executions. +- [improvement] JAVA-1717: Add metrics to latency-aware policy. +- [improvement] JAVA-1675: Remove dates from copyright headers. + +Merged from 3.3.x: + +- [bug] JAVA-1555: Include VIEW and CDC in WriteType. +- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) +- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery +- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. +- [bug] JAVA-1438: QueryBuilder check for empty orderings. +- [improvement] JAVA-1490: Allow zero delay for speculative executions. +- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. +- [bug] JAVA-1630: Fix Metadata.addIfAbsent. +- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. +- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. +- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. +- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. +- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. +- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. +- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. +- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. +- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. +- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. +- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. +- [improvement] JAVA-1659: Expose low-level flusher tuning options. +- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. + + +### 3.3.2 + +- [bug] JAVA-1666: Fix keyspace export when a UDT has case-sensitive field names. +- [improvement] JAVA-1196: Include hash of result set metadata in prepared statement id. +- [improvement] JAVA-1670: Support user-provided JMX ports for CCMBridge. +- [improvement] JAVA-1661: Avoid String.toLowerCase if possible in Metadata. +- [improvement] JAVA-1659: Expose low-level flusher tuning options. +- [improvement] JAVA-1660: Support netty-transport-native-epoll in OSGi container. + + +### 3.3.1 + +- [bug] JAVA-1555: Include VIEW and CDC in WriteType. +- [bug] JAVA-1599: exportAsString improvements (sort, format, clustering order) +- [improvement] JAVA-1587: Deterministic ordering of columns used in Mapper#saveQuery +- [improvement] JAVA-1500: Add a metric to report number of in-flight requests. +- [bug] JAVA-1438: QueryBuilder check for empty orderings. +- [improvement] JAVA-1490: Allow zero delay for speculative executions. +- [documentation] JAVA-1607: Add FAQ entry for netty-transport-native-epoll. +- [bug] JAVA-1630: Fix Metadata.addIfAbsent. +- [improvement] JAVA-1619: Update QueryBuilder methods to support Iterable input. +- [improvement] JAVA-1527: Expose host_id and schema_version on Host metadata. +- [new feature] JAVA-1377: Add support for TWCS in SchemaBuilder. +- [improvement] JAVA-1631: Publish a sources jar for driver-core-tests. +- [improvement] JAVA-1632: Add a withIpPrefix(String) method to CCMBridge.Builder. +- [bug] JAVA-1639: VersionNumber does not fullfill equals/hashcode contract. +- [bug] JAVA-1613: Fix broken shaded Netty detection in NettyUtil. + + +### 3.3.0 + +- [bug] JAVA-1469: Update LoggingRetryPolicy to deal with SLF4J-353. +- [improvement] JAVA-1203: Upgrade Metrics to allow usage in OSGi. +- [bug] JAVA-1407: KeyspaceMetadata exportAsString should export user types in topological sort order. +- [bug] JAVA-1455: Mapper support using unset for null values. +- [bug] JAVA-1464: Allow custom codecs with non public constructors in @Param. +- [bug] JAVA-1470: Querying multiple pages overrides WrappedStatement. +- [improvement] JAVA-1428: Upgrade logback and jackson dependencies. +- [documentation] JAVA-1463: Revisit speculative execution docs. +- [documentation] JAVA-1466: Revisit timestamp docs. +- [documentation] JAVA-1445: Clarify how nodes are penalized in LatencyAwarePolicy docs. +- [improvement] JAVA-1446: Support 'DEFAULT UNSET' in Query Builder JSON Insert. +- [improvement] JAVA-1443: Add groupBy method to Select statement. +- [improvement] JAVA-1458: Check thread in mapper sync methods. +- [improvement] JAVA-1488: Upgrade Netty to 4.0.47.Final. +- [improvement] JAVA-1460: Add speculative execution number to ExecutionInfo +- [improvement] JAVA-1431: Improve error handling during pool initialization. + + +### 3.2.0 + +- [new feature] JAVA-1347: Add support for duration type. +- [new feature] JAVA-1248: Implement "beta" flag for native protocol v5. +- [new feature] JAVA-1362: Send query options flags as [int] for Protocol V5+. +- [new feature] JAVA-1364: Enable creation of SSLHandler with remote address information. +- [improvement] JAVA-1367: Make protocol negotiation more resilient. +- [bug] JAVA-1397: Handle duration as native datatype in protocol v5+. +- [improvement] JAVA-1308: CodecRegistry performance improvements. +- [improvement] JAVA-1287: Add CDC to TableOptionsMetadata and Schema Builder. +- [improvement] JAVA-1392: Reduce lock contention in RPTokenFactory. +- [improvement] JAVA-1328: Provide compatibility with Guava 20. +- [improvement] JAVA-1247: Disable idempotence warnings. +- [improvement] JAVA-1286: Support setting and retrieving udt fields in QueryBuilder. +- [bug] JAVA-1415: Correctly report if a UDT column is frozen. +- [bug] JAVA-1418: Make Guava version detection more reliable. +- [new feature] JAVA-1174: Add ifNotExists option to mapper. +- [improvement] JAVA-1414: Optimize Metadata.escapeId and Metadata.handleId. +- [improvement] JAVA-1310: Make mapper's ignored properties configurable. +- [improvement] JAVA-1316: Add strategy for resolving properties into CQL names. +- [bug] JAVA-1424: Handle new WRITE_FAILURE and READ_FAILURE format in v5 protocol. + +Merged from 3.1.x branch: + +- [bug] JAVA-1371: Reintroduce connection pool timeout. +- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. +- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. +- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. +- [bug] JAVA-1404: Fix min token handling in TokenRange.contains. +- [bug] JAVA-1429: Prevent heartbeats until connection is fully initialized. + + +### 3.1.4 + +Merged from 3.0.x branch: + +- [bug] JAVA-1371: Reintroduce connection pool timeout. +- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. +- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. +- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. + + +### 3.1.3 + +Merged from 3.0.x branch: + +- [bug] JAVA-1330: Add un/register for SchemaChangeListener in DelegatingCluster +- [bug] JAVA-1351: Include Custom Payload in Request.copy. +- [bug] JAVA-1346: Reset heartbeat only on client reads (not writes). +- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. + + +### 3.1.2 + +- [bug] JAVA-1321: Wrong OSGi dependency version for Guava. + +Merged from 3.0.x branch: + +- [bug] JAVA-1312: QueryBuilder modifies selected columns when manually selected. +- [improvement] JAVA-1303: Add missing BoundStatement.setRoutingKey(ByteBuffer...) +- [improvement] JAVA-262: Make internal executors customizable + + +### 3.1.1 + +- [bug] JAVA-1284: ClockFactory should check system property before attempting to load Native class. +- [bug] JAVA-1255: Allow nested UDTs to be used in Mapper. +- [bug] JAVA-1279: Mapper should exclude Groovy's "metaClass" property when looking for mapped properties + +Merged from 3.0.x branch: + +- [improvement] JAVA-1246: Driver swallows the real exception in a few cases +- [improvement] JAVA-1261: Throw error when attempting to page in I/O thread. +- [bug] JAVA-1258: Regression: Mapper cannot map a materialized view after JAVA-1126. +- [bug] JAVA-1101: Batch and BatchStatement should consider inner statements to determine query idempotence +- [improvement] JAVA-1262: Use ParseUtils for quoting & unquoting. +- [improvement] JAVA-1275: Use Netty's default thread factory +- [bug] JAVA-1285: QueryBuilder routing key auto-discovery should handle case-sensitive column names. +- [bug] JAVA-1283: Don't cache failed query preparations in the mapper. +- [improvement] JAVA-1277: Expose AbstractSession.checkNotInEventLoop. +- [bug] JAVA-1272: BuiltStatement not able to print its query string if it contains mapped UDTs. +- [bug] JAVA-1292: 'Adjusted frame length' error breaks driver's ability to read data. +- [improvement] JAVA-1293: Make DecoderForStreamIdSize.MAX_FRAME_LENGTH configurable. +- [improvement] JAVA-1053: Add a metric for authentication errors +- [improvement] JAVA-1263: Eliminate unnecessary memory copies in FrameCompressor implementations. +- [improvement] JAVA-893: Make connection pool non-blocking + + +### 3.1.0 + +- [new feature] JAVA-1153: Add PER PARTITION LIMIT to Select QueryBuilder. +- [improvement] JAVA-743: Add JSON support to QueryBuilder. +- [improvement] JAVA-1233: Update HdrHistogram to 2.1.9. +- [improvement] JAVA-1233: Update Snappy to 1.1.2.6. +- [bug] JAVA-1161: Preserve full time zone info in ZonedDateTimeCodec and DateTimeCodec. +- [new feature] JAVA-1157: Allow asynchronous paging of Mapper Result. +- [improvement] JAVA-1212: Don't retry non-idempotent statements by default. +- [improvement] JAVA-1192: Make EventDebouncer settings updatable at runtime. +- [new feature] JAVA-541: Add polymorphism support to object mapper. +- [new feature] JAVA-636: Allow @Column annotations on getters/setters as well as fields. +- [new feature] JAVA-984: Allow non-void setters in object mapping. +- [new feature] JAVA-1055: Add ErrorAware load balancing policy. + +Merged from 3.0.x branch: + +- [bug] JAVA-1179: Request objects should be copied when executed. +- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. +- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. +- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. +- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. +- [improvement] JAVA-1151: Fail fast if HdrHistogram is not in the classpath. +- [improvement] JAVA-1154: Allow individual Statement to cancel the read timeout. +- [bug] JAVA-1074: Fix documentation around default timestamp generator. +- [improvement] JAVA-1109: Document SSLOptions changes in upgrade guide. +- [improvement] JAVA-1065: Add method to create token from partition key values. +- [improvement] JAVA-1136: Enable JDK signature check in module driver-extras. +- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. +- [bug] JAVA-1140: Use same connection to check for schema agreement after a DDL query. +- [improvement] JAVA-1113: Support Cassandra 3.4 LIKE operator in QueryBuilder. +- [improvement] JAVA-1086: Support Cassandra 3.2 CAST function in QueryBuilder. +- [bug] JAVA-1095: Check protocol version for custom payload before sending the query. +- [improvement] JAVA-1133: Add OSGi headers to cassandra-driver-extras. +- [bug] JAVA-1137: Incorrect string returned by DataType.asFunctionParameterString() for collections and tuples. +- [bug] JAVA-1046: (Dynamic)CompositeTypes need to be parsed as string literal, not blob. +- [improvement] JAVA-1164: Clarify documentation on Host.listenAddress and broadcastAddress. +- [improvement] JAVA-1171: Add Host method to determine if DSE Graph is enabled. +- [improvement] JAVA-1069: Bootstrap driver-examples module. +- [documentation] JAVA-1150: Add example and FAQ entry about ByteBuffer/BLOB. +- [improvement] JAVA-1011: Expose PoolingOptions default values. +- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. +- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. +- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. +- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. +- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. +- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. +- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI +- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. +- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. +- [improvement] JAVA-923: Position idempotent flag on object mapper queries. +- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. +- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). +- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. +- [improvement] JAVA-852: Ignore peers with null entries during discovery. +- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. +- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. +- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. +- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). + +Merged from 2.1 branch: + +- [improvement] JAVA-1038: Fetch node info by rpc_address if its broadcast_address is not in system.peers. +- [improvement] JAVA-888: Add cluster-wide percentile tracker. +- [improvement] JAVA-963: Automatically register PercentileTracker from components that use it. +- [new feature] JAVA-1019: SchemaBuilder support for CREATE/ALTER/DROP KEYSPACE. +- [bug] JAVA-727: Allow monotonic timestamp generators to drift in the future + use microsecond precision when possible. +- [improvement] JAVA-444: Add Java process information to UUIDs.makeNode() hash. + + +### 3.0.7 + +- [bug] JAVA-1371: Reintroduce connection pool timeout. +- [bug] JAVA-1313: Copy SerialConsistencyLevel to PreparedStatement. +- [documentation] JAVA-1334: Clarify documentation of method `addContactPoints`. +- [improvement] JAVA-1357: Document that getReplicas only returns replicas of the last token in range. + + +### 3.0.6 + +- [bug] JAVA-1330: Add un/register for SchemaChangeListener in DelegatingCluster +- [bug] JAVA-1351: Include Custom Payload in Request.copy. +- [bug] JAVA-1346: Reset heartbeat only on client reads (not writes). +- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. + + +### 3.0.5 + +- [bug] JAVA-1312: QueryBuilder modifies selected columns when manually selected. +- [improvement] JAVA-1303: Add missing BoundStatement.setRoutingKey(ByteBuffer...) +- [improvement] JAVA-262: Make internal executors customizable +- [bug] JAVA-1320: prevent unnecessary task creation on empty pool + + +### 3.0.4 + +- [improvement] JAVA-1246: Driver swallows the real exception in a few cases +- [improvement] JAVA-1261: Throw error when attempting to page in I/O thread. +- [bug] JAVA-1258: Regression: Mapper cannot map a materialized view after JAVA-1126. +- [bug] JAVA-1101: Batch and BatchStatement should consider inner statements to determine query idempotence +- [improvement] JAVA-1262: Use ParseUtils for quoting & unquoting. +- [improvement] JAVA-1275: Use Netty's default thread factory +- [bug] JAVA-1285: QueryBuilder routing key auto-discovery should handle case-sensitive column names. +- [bug] JAVA-1283: Don't cache failed query preparations in the mapper. +- [improvement] JAVA-1277: Expose AbstractSession.checkNotInEventLoop. +- [bug] JAVA-1272: BuiltStatement not able to print its query string if it contains mapped UDTs. +- [bug] JAVA-1292: 'Adjusted frame length' error breaks driver's ability to read data. +- [improvement] JAVA-1293: Make DecoderForStreamIdSize.MAX_FRAME_LENGTH configurable. +- [improvement] JAVA-1053: Add a metric for authentication errors +- [improvement] JAVA-1263: Eliminate unnecessary memory copies in FrameCompressor implementations. +- [improvement] JAVA-893: Make connection pool non-blocking + + +### 3.0.3 + +- [improvement] JAVA-1147: Upgrade Netty to 4.0.37. +- [bug] JAVA-1213: Allow updates and inserts to BLOB column using read-only ByteBuffer. +- [bug] JAVA-1209: ProtocolOptions.getProtocolVersion() should return null instead of throwing NPE if Cluster has not + been init'd. +- [improvement] JAVA-1204: Update documentation to indicate tcnative version requirement. +- [bug] JAVA-1186: Fix duplicated hosts in DCAwarePolicy warn message. +- [bug] JAVA-1187: Fix warning message when local CL used with RoundRobinPolicy. +- [improvement] JAVA-1175: Warn if DCAwarePolicy configuration is inconsistent. +- [bug] JAVA-1139: ConnectionException.getMessage() throws NPE if address is null. +- [bug] JAVA-1202: Handle null rpc_address when checking schema agreement. +- [improvement] JAVA-1198: Document that BoundStatement is not thread-safe. +- [improvement] JAVA-1200: Upgrade LZ4 to 1.3.0. +- [bug] JAVA-1232: Fix NPE in IdempotenceAwareRetryPolicy.isIdempotent. +- [improvement] JAVA-1227: Document "SELECT *" issue with prepared statement. +- [bug] JAVA-1160: Fix NPE in VersionNumber.getPreReleaseLabels(). +- [improvement] JAVA-1126: Handle schema changes in Mapper. +- [bug] JAVA-1193: Refresh token and replica metadata synchronously when schema is altered. +- [bug] JAVA-1120: Skip schema refresh debouncer when checking for agreement as a result of schema change made by client. +- [improvement] JAVA-1242: Fix driver-core dependency in driver-stress +- [improvement] JAVA-1235: Move the query to the end of "re-preparing .." log message as a key value. + + +### 3.0.2 + +Merged from 2.1 branch: + +- [bug] JAVA-1179: Request objects should be copied when executed. +- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. +- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. + + +### 3.0.1 + +- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. +- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. +- [improvement] JAVA-1151: Fail fast if HdrHistogram is not in the classpath. +- [improvement] JAVA-1154: Allow individual Statement to cancel the read timeout. +- [bug] JAVA-1074: Fix documentation around default timestamp generator. +- [improvement] JAVA-1109: Document SSLOptions changes in upgrade guide. +- [improvement] JAVA-1065: Add method to create token from partition key values. +- [improvement] JAVA-1136: Enable JDK signature check in module driver-extras. +- [improvement] JAVA-866: Support tuple notation in QueryBuilder.eq/in. +- [bug] JAVA-1140: Use same connection to check for schema agreement after a DDL query. +- [improvement] JAVA-1113: Support Cassandra 3.4 LIKE operator in QueryBuilder. +- [improvement] JAVA-1086: Support Cassandra 3.2 CAST function in QueryBuilder. +- [bug] JAVA-1095: Check protocol version for custom payload before sending the query. +- [improvement] JAVA-1133: Add OSGi headers to cassandra-driver-extras. +- [bug] JAVA-1137: Incorrect string returned by DataType.asFunctionParameterString() for collections and tuples. +- [bug] JAVA-1046: (Dynamic)CompositeTypes need to be parsed as string literal, not blob. +- [improvement] JAVA-1164: Clarify documentation on Host.listenAddress and broadcastAddress. +- [improvement] JAVA-1171: Add Host method to determine if DSE Graph is enabled. +- [improvement] JAVA-1069: Bootstrap driver-examples module. +- [documentation] JAVA-1150: Add example and FAQ entry about ByteBuffer/BLOB. + +Merged from 2.1 branch: + +- [improvement] JAVA-1011: Expose PoolingOptions default values. +- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. +- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. +- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. +- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. +- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. +- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. +- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI +- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. +- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. +- [improvement] JAVA-923: Position idempotent flag on object mapper queries. +- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. +- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). +- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. +- [improvement] JAVA-852: Ignore peers with null entries during discovery. +- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. +- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. +- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. +- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). + + +### 3.0.0 + +- [bug] JAVA-1034: fix metadata parser for collections of custom types. +- [improvement] JAVA-1035: Expose host broadcast_address and listen_address if available. +- [new feature] JAVA-1037: Allow named parameters in simple statements. +- [improvement] JAVA-1033: Allow per-statement read timeout. +- [improvement] JAVA-1042: Include DSE version and workload in Host data. + +Merged from 2.1 branch: + +- [improvement] JAVA-1030: Log token to replica map computation times. +- [bug] JAVA-1039: Minor bugs in Event Debouncer. + + +### 3.0.0-rc1 + +- [bug] JAVA-890: fix mapper for case-sensitive UDT. + + +### 3.0.0-beta1 + +- [bug] JAVA-993: Support for "custom" types after CASSANDRA-10365. +- [bug] JAVA-999: Handle unset parameters in QueryLogger. +- [bug] JAVA-998: SchemaChangeListener not invoked for Functions or Aggregates having UDT arguments. +- [bug] JAVA-1009: use CL ONE to compute query plan when reconnecting + control connection. +- [improvement] JAVA-1003: Change default consistency level to LOCAL_ONE (amends JAVA-926). +- [improvement] JAVA-863: Idempotence propagation in prepared statements. +- [improvement] JAVA-996: Make CodecRegistry available to ProtocolDecoder. +- [bug] JAVA-819: Driver shouldn't retry on client timeout if statement is not idempotent. +- [improvement] JAVA-1007: Make SimpleStatement and QueryBuilder "detached" again. + +Merged from 2.1 branch: + +- [improvement] JAVA-989: Include keyspace name when invalid replication found when generating token map. +- [improvement] JAVA-664: Reduce heap consumption for TokenMap. +- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. + + +### 3.0.0-alpha5 + +- [improvement] JAVA-958: Make TableOrView.Order visible. +- [improvement] JAVA-968: Update metrics to the latest version. +- [improvement] JAVA-965: Improve error handling for when a non-type 1 UUID is given to bind() on a timeuuid column. +- [improvement] JAVA-885: Pass the authenticator name from the server to the auth provider. +- [improvement] JAVA-961: Raise an exception when an older version of guava (<16.01) is found. +- [bug] JAVA-972: TypeCodec.parse() implementations should be case insensitive when checking for keyword NULL. +- [bug] JAVA-971: Make type codecs invariant. +- [bug] JAVA-986: Update documentation links to reference 3.0. +- [improvement] JAVA-841: Refactor SSLOptions API. +- [improvement] JAVA-948: Don't limit cipher suites by default. +- [improvement] JAVA-917: Document SSL configuration. +- [improvement] JAVA-936: Adapt schema metadata parsing logic to new storage format of CQL types in C* 3.0. +- [new feature] JAVA-846: Provide custom codecs library as an extra module. +- [new feature] JAVA-742: Codec Support for JSON. +- [new feature] JAVA-606: Codec support for Java 8. +- [new feature] JAVA-565: Codec support for Java arrays. +- [new feature] JAVA-605: Codec support for Java enums. +- [bug] JAVA-884: Fix UDT mapper to process fields in the correct order. + +Merged from 2.1 branch: + +- [bug] JAVA-854: avoid early return in Cluster.init when a node doesn't support the protocol version. +- [bug] JAVA-978: Fix quoting issue that caused Mapper.getTableMetadata() to return null. +- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. +- [bug] JAVA-954: Don't trigger reconnection before initialization complete. +- [improvement] JAVA-914: Avoid rejected tasks at shutdown. +- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). +- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. +- [bug] JAVA-960: Avoid race in control connection shutdown. +- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. +- [bug] JAVA-966: Count uninitialized connections in conviction policy. +- [improvement] JAVA-917: Document SSL configuration. +- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. +- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. +- [bug] JAVA-988: Metadata.handleId should handle escaped double quotes. +- [bug] JAVA-983: QueryBuilder cannot handle collections containing function calls. + + +### 3.0.0-alpha4 + +- [improvement] JAVA-926: Change default consistency level to LOCAL_QUORUM. +- [bug] JAVA-942: Fix implementation of UserType.hashCode(). +- [improvement] JAVA-877: Don't delay UP/ADDED notifications if protocol version = V4. +- [improvement] JAVA-938: Parse 'extensions' column in table metadata. +- [bug] JAVA-900: Fix Configuration builder to allow disabled metrics. +- [new feature] JAVA-902: Prepare API for async query trace. +- [new feature] JAVA-930: Add BoundStatement#unset. +- [bug] JAVA-946: Make table metadata options class visible. +- [bug] JAVA-939: Add crcCheckChance to TableOptionsMetadata#equals/hashCode. +- [bug] JAVA-922: Make TypeCodec return mutable collections. +- [improvement] JAVA-932: Limit visibility of codec internals. +- [improvement] JAVA-934: Warn if a custom codec collides with an existing one. +- [improvement] JAVA-940: Allow typed getters/setters to target any CQL type. +- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. +- [bug] JAVA-953: Fix MaterializedViewMetadata when base table name is case sensitive. + + +### 3.0.0-alpha3 + +- [new feature] JAVA-571: Support new system tables in C* 3.0. +- [improvement] JAVA-919: Move crc_check_chance out of compressions options. + +Merged from 2.0 branch: + +- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. +- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. +- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. +- [improvement] #340: Allow DNS name with multiple A-records as contact point. +- [bug] JAVA-794: Allow tracing across multiple result pages. +- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. +- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. +- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. +- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid + replication factors. +- [improvement] JAVA-662: Add PoolingOptions method to set both core and max + connections. +- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. +- [improvement] JAVA-726: Optimize internal copies of Request objects. +- [bug] JAVA-815: Preserve tracing across retries. +- [improvement] JAVA-709: New RetryDecision.tryNextHost(). +- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. +- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. +- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. +- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. +- [improvement] JAVA-720: Surface the coordinator used on query failure. +- [bug] JAVA-792: Handle contact points removed during init. +- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. +- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. +- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. +- [improvement] JAVA-797: Provide an option to prepare statements only on one node. +- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. +- [improvement] JAVA-853: Customizable creation of netty timer. +- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. +- [improvement] JAVA-657: Debounce control connection queries. +- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). +- [new feature] JAVA-828: Make driver-side metadata optional. +- [improvement] JAVA-544: Allow hosts to remain partially up. +- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session + creation. +- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other + hosts. +- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. +- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. + + +### 3.0.0-alpha2 + +- [new feature] JAVA-875, JAVA-882: Move secondary index metadata out of column definitions. + +Merged from 2.2 branch: + +- [bug] JAVA-847: Propagate CodecRegistry to nested UDTs. +- [improvement] JAVA-848: Ability to store a default, shareable CodecRegistry + instance. +- [bug] JAVA-880: Treat empty ByteBuffers as empty values in TupleCodec and + UDTCodec. + + +### 3.0.0-alpha1 + +- [new feature] JAVA-876: Support new system tables in C* 3.0.0-alpha1. + +Merged from 2.2 branch: + +- [improvement] JAVA-810: Rename DateWithoutTime to LocalDate. +- [bug] JAVA-816: DateCodec does not format values correctly. +- [bug] JAVA-817: TimeCodec does not format values correctly. +- [bug] JAVA-818: TypeCodec.getDataTypeFor() does not handle LocalDate instances. +- [improvement] JAVA-836: Make ResultSet#fetchMoreResult return a + ListenableFuture. +- [improvement] JAVA-843: Disable frozen checks in mapper. +- [improvement] JAVA-721: Allow user to register custom type codecs. +- [improvement] JAVA-722: Support custom type codecs in mapper. + + +### 2.2.0-rc3 + +- [bug] JAVA-847: Propagate CodecRegistry to nested UDTs. +- [improvement] JAVA-848: Ability to store a default, shareable CodecRegistry + instance. +- [bug] JAVA-880: Treat empty ByteBuffers as empty values in TupleCodec and + UDTCodec. + + +### 2.2.0-rc2 + +- [improvement] JAVA-810: Rename DateWithoutTime to LocalDate. +- [bug] JAVA-816: DateCodec does not format values correctly. +- [bug] JAVA-817: TimeCodec does not format values correctly. +- [bug] JAVA-818: TypeCodec.getDataTypeFor() does not handle LocalDate instances. +- [improvement] JAVA-836: Make ResultSet#fetchMoreResult return a + ListenableFuture. +- [improvement] JAVA-843: Disable frozen checks in mapper. +- [improvement] JAVA-721: Allow user to register custom type codecs. +- [improvement] JAVA-722: Support custom type codecs in mapper. + +Merged from 2.1 branch: + +- [bug] JAVA-834: Special case check for 'null' string in index_options column. +- [improvement] JAVA-835: Allow accessor methods with less parameters in case + named bind markers are repeated. +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-715: Make NativeColumnType a top-level class. +- [improvement] JAVA-700: Expose ProtocolVersion#toInt. +- [bug] JAVA-542: Handle void return types in accessors. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-713: HashMap throws an OOM Exception when logging level is set to TRACE. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-732: Expose KEYS and FULL indexing options in IndexMetadata. +- [improvement] JAVA-589: Allow @Enumerated in Accessor method parameters. +- [improvement] JAVA-554: Allow access to table metadata from Mapper. +- [improvement] JAVA-661: Provide a way to map computed fields. +- [improvement] JAVA-824: Ignore missing columns in mapper. +- [bug] JAVA-724: Preserve default timestamp for retries and speculative executions. +- [improvement] JAVA-738: Use same pool implementation for protocol v2 and v3. +- [improvement] JAVA-677: Support CONTAINS / CONTAINS KEY in QueryBuilder. +- [improvement] JAVA-477/JAVA-540: Add USING options in mapper for delete and save + operations. +- [improvement] JAVA-473: Add mapper option to configure whether to save null fields. + +Merged from 2.0 branch: + +- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. +- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. +- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. +- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid + replication factors. +- [improvement] JAVA-662: Add PoolingOptions method to set both core and max + connections. +- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. +- [improvement] JAVA-726: Optimize internal copies of Request objects. +- [bug] JAVA-815: Preserve tracing across retries. +- [improvement] JAVA-709: New RetryDecision.tryNextHost(). +- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. + + +### 2.2.0-rc1 + +- [new feature] JAVA-783: Protocol V4 enum support. +- [new feature] JAVA-776: Use PK columns in protocol v4 PREPARED response. +- [new feature] JAVA-777: Distinguish NULL and UNSET values. +- [new feature] JAVA-779: Add k/v payload for 3rd party usage. +- [new feature] JAVA-780: Expose server-side warnings on ExecutionInfo. +- [new feature] JAVA-749: Expose new read/write failure exceptions. +- [new feature] JAVA-747: Expose function and aggregate metadata. +- [new feature] JAVA-778: Add new client exception for CQL function failure. +- [improvement] JAVA-700: Expose ProtocolVersion#toInt. +- [new feature] JAVA-404: Support new C* 2.2 CQL date and time types. + +Merged from 2.1 branch: + +- [improvement] JAVA-782: Unify "Target" enum for schema elements. + + +### 2.1.10.2 + +Merged from 2.0 branch: + +- [bug] JAVA-1179: Request objects should be copied when executed. +- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. +- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. + + +### 2.1.10.1 + +- [bug] JAVA-1152: Fix NPE at ControlConnection.refreshNodeListAndTokenMap(). +- [bug] JAVA-1156: Fix NPE at TableMetadata.equals(). + + +### 2.1.10 + +- [bug] JAVA-988: Metadata.handleId should handle escaped double quotes. +- [bug] JAVA-983: QueryBuilder cannot handle collections containing function calls. +- [improvement] JAVA-863: Idempotence propagation in PreparedStatements. +- [bug] JAVA-937: TypeCodec static initializers not always correctly executed. +- [improvement] JAVA-989: Include keyspace name when invalid replication found when generating token map. +- [improvement] JAVA-664: Reduce heap consumption for TokenMap. +- [improvement] JAVA-1030: Log token to replica map computation times. +- [bug] JAVA-1039: Minor bugs in Event Debouncer. +- [improvement] JAVA-843: Disable frozen checks in mapper. +- [improvement] JAVA-833: Improve message when a nested type can't be serialized. +- [improvement] JAVA-1011: Expose PoolingOptions default values. +- [improvement] JAVA-630: Don't process DOWN events for nodes that have active connections. +- [improvement] JAVA-851: Improve UUIDs javadoc with regard to user-provided timestamps. +- [improvement] JAVA-979: Update javadoc for RegularStatement toString() and getQueryString() to indicate that consistency level and other parameters are not maintained in the query string. +- [improvement] JAVA-1038: Fetch node info by rpc_address if its broadcast_address is not in system.peers. +- [improvement] JAVA-974: Validate accessor parameter types against bound statement. +- [bug] JAVA-1068: Unwrap StatementWrappers when hashing the paging state. +- [bug] JAVA-831: Mapper can't load an entity where the PK is a UDT. +- [improvement] JAVA-1021: Improve error message when connect() is called with an invalid keyspace name. +- [improvement] JAVA-879: Mapper.map() accepts mapper-generated and user queries. +- [bug] JAVA-1100: Exception when connecting with shaded java driver in OSGI +- [bug] JAVA-819: Expose more errors in RetryPolicy + provide idempotent-aware wrapper. +- [improvement] JAVA-1040: SimpleStatement parameters support in QueryLogger. +- [bug] JAVA-1064: getTable create statement doesn't properly handle quotes in primary key. +- [improvement] JAVA-888: Add cluster-wide percentile tracker. +- [improvement] JAVA-963: Automatically register PercentileTracker from components that use it. +- [bug] JAVA-1089: Set LWT made from BuiltStatements to non-idempotent. +- [improvement] JAVA-923: Position idempotent flag on object mapper queries. +- [new feature] JAVA-1019: SchemaBuilder support for CREATE/ALTER/DROP KEYSPACE. +- [bug] JAVA-1070: The Mapper should not prepare queries synchronously. +- [new feature] JAVA-982: Introduce new method ConsistencyLevel.isSerial(). +- [bug] JAVA-764: Retry with the normal consistency level (not the serial one) when a write times out on the Paxos phase. +- [bug] JAVA-727: Allow monotonic timestamp generators to drift in the future + use microsecond precision when possible. +- [improvement] JAVA-444: Add Java process information to UUIDs.makeNode() hash. +- [improvement] JAVA-977: Preserve original cause when BuiltStatement value can't be serialized. +- [bug] JAVA-1094: Backport TypeCodec parse and format fixes from 3.0. +- [improvement] JAVA-852: Ignore peers with null entries during discovery. +- [bug] JAVA-1132: Executing bound statement with no variables results in exception with protocol v1. +- [bug] JAVA-1005: DowngradingConsistencyRetryPolicy does not work with EACH_QUORUM when 1 DC is down. +- [bug] JAVA-1002: Avoid deadlock when re-preparing a statement on other hosts. + +Merged from 2.0 branch: + +- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. +- [improvement] JAVA-805: Document that metrics are null until Cluster is initialized. +- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. + + +### 2.1.9 + +- [bug] JAVA-942: Fix implementation of UserType.hashCode(). +- [bug] JAVA-854: avoid early return in Cluster.init when a node doesn't support the protocol version. +- [bug] JAVA-978: Fix quoting issue that caused Mapper.getTableMetadata() to return null. + +Merged from 2.0 branch: + +- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. +- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. +- [bug] JAVA-954: Don't trigger reconnection before initialization complete. +- [improvement] JAVA-914: Avoid rejected tasks at shutdown. +- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). +- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. +- [bug] JAVA-960: Avoid race in control connection shutdown. +- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. +- [bug] JAVA-966: Count uninitialized connections in conviction policy. +- [improvement] JAVA-917: Document SSL configuration. +- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. +- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. + + +### 2.1.8 + +Merged from 2.0 branch: + +- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. + +- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. +- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. +- [improvement] #340: Allow DNS name with multiple A-records as contact point. +- [bug] JAVA-794: Allow tracing across multiple result pages. +- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. +- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. +- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. +- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid + replication factors. +- [improvement] JAVA-662: Add PoolingOptions method to set both core and max + connections. +- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. +- [improvement] JAVA-726: Optimize internal copies of Request objects. +- [bug] JAVA-815: Preserve tracing across retries. +- [improvement] JAVA-709: New RetryDecision.tryNextHost(). +- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. +- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. +- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. +- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. +- [improvement] JAVA-720: Surface the coordinator used on query failure. +- [bug] JAVA-792: Handle contact points removed during init. +- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. +- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. +- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. +- [improvement] JAVA-797: Provide an option to prepare statements only on one node. +- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. +- [improvement] JAVA-853: Customizable creation of netty timer. +- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. +- [improvement] JAVA-657: Debounce control connection queries. +- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). +- [new feature] JAVA-828: Make driver-side metadata optional. +- [improvement] JAVA-544: Allow hosts to remain partially up. +- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session + creation. +- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other + hosts. +- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. +- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. + + +### 2.1.7.1 + +- [bug] JAVA-834: Special case check for 'null' string in index_options column. +- [improvement] JAVA-835: Allow accessor methods with less parameters in case + named bind markers are repeated. + + +### 2.1.7 + +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-715: Make NativeColumnType a top-level class. +- [improvement] JAVA-782: Unify "Target" enum for schema elements. +- [improvement] JAVA-700: Expose ProtocolVersion#toInt. +- [bug] JAVA-542: Handle void return types in accessors. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-713: HashMap throws an OOM Exception when logging level is set to TRACE. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-732: Expose KEYS and FULL indexing options in IndexMetadata. +- [improvement] JAVA-589: Allow @Enumerated in Accessor method parameters. +- [improvement] JAVA-554: Allow access to table metadata from Mapper. +- [improvement] JAVA-661: Provide a way to map computed fields. +- [improvement] JAVA-824: Ignore missing columns in mapper. +- [bug] JAVA-724: Preserve default timestamp for retries and speculative executions. +- [improvement] JAVA-738: Use same pool implementation for protocol v2 and v3. +- [improvement] JAVA-677: Support CONTAINS / CONTAINS KEY in QueryBuilder. +- [improvement] JAVA-477/JAVA-540: Add USING options in mapper for delete and save + operations. +- [improvement] JAVA-473: Add mapper option to configure whether to save null fields. + +Merged from 2.0 branch: + +- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. +- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. +- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. +- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid + replication factors. +- [improvement] JAVA-662: Add PoolingOptions method to set both core and max + connections. +- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. +- [improvement] JAVA-726: Optimize internal copies of Request objects. +- [bug] JAVA-815: Preserve tracing across retries. +- [improvement] JAVA-709: New RetryDecision.tryNextHost(). +- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. + + +### 2.1.6 + +Merged from 2.0 branch: + +- [new feature] JAVA-584: Add getObject to BoundStatement and Row. +- [improvement] JAVA-419: Improve connection pool resizing algorithm. +- [bug] JAVA-599: Fix race condition between pool expansion and shutdown. +- [improvement] JAVA-622: Upgrade Netty to 4.0.27. +- [improvement] JAVA-562: Coalesce frames before flushing them to the connection. +- [improvement] JAVA-583: Rename threads to indicate that they are for the driver. +- [new feature] JAVA-550: Expose paging state. +- [new feature] JAVA-646: Slow Query Logger. +- [improvement] JAVA-698: Exclude some errors from measurements in LatencyAwarePolicy. +- [bug] JAVA-641: Fix issue when executing a PreparedStatement from another cluster. +- [improvement] JAVA-534: Log keyspace xxx does not exist at WARN level. +- [improvement] JAVA-619: Allow Cluster subclasses to delegate to another instance. +- [new feature] JAVA-669: Expose an API to check for schema agreement after a + schema-altering statement. +- [improvement] JAVA-692: Make connection and pool creation fully async. +- [improvement] JAVA-505: Optimize connection use after reconnection. +- [improvement] JAVA-617: Remove "suspected" mechanism. +- [improvement] reverts JAVA-425: Don't mark connection defunct on client timeout. +- [new feature] JAVA-561: Speculative query executions. +- [bug] JAVA-666: Release connection before completing the ResultSetFuture. +- [new feature BETA] JAVA-723: Percentile-based variant of query logger and speculative + executions. +- [bug] JAVA-734: Fix buffer leaks when compression is enabled. +- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. +- [improvement] JAVA-759: Expose "unsafe" paging state API. +- [bug] JAVA-768: Prevent race during pool initialization. + + +### 2.1.5 + +- [bug] JAVA-575: Authorize Null parameter in Accessor method. +- [improvement] JAVA-570: Support C* 2.1.3's nested collections. +- [bug] JAVA-612: Fix checks on mapped collection types. +- [bug] JAVA-672: Fix QueryBuilder.putAll() when the collection contains UDTs. + +Merged from 2.0 branch: + +- [new feature] JAVA-518: Add AddressTranslater for EC2 multi-region deployment. +- [improvement] JAVA-533: Add connection heartbeat. +- [improvement] JAVA-568: Reduce level of logs on missing rpc_address. +- [improvement] JAVA-312, JAVA-681: Expose node token and range information. +- [bug] JAVA-595: Fix cluster name mismatch check at startup. +- [bug] JAVA-620: Fix guava dependency when using OSGI. +- [bug] JAVA-678: Fix handling of DROP events when ks name is case-sensitive. +- [improvement] JAVA-631: Use List instead of List in QueryBuilder API. +- [improvement] JAVA-654: Exclude Netty POM from META-INF in shaded JAR. +- [bug] JAVA-655: Quote single quotes contained in table comments in asCQLQuery method. +- [bug] JAVA-684: Empty TokenRange returned in a one token cluster. +- [improvement] JAVA-687: Expose TokenRange#contains. +- [bug] JAVA-614: Prevent race between cancellation and query completion. +- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if + streamId was already released and reused. +- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. +- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. +- [bug] JAVA-651: Fix edge cases where a connection was released twice. +- [bug] JAVA-653: Fix edge cases in query cancellation. + + +### 2.1.4 + +Merged from 2.0 branch: + +- [improvement] JAVA-538: Shade Netty dependency. +- [improvement] JAVA-543: Target schema refreshes more precisely. +- [bug] JAVA-546: Don't check rpc_address for control host. +- [improvement] JAVA-409: Improve message of NoHostAvailableException. +- [bug] JAVA-556: Rework connection reaper to avoid deadlock. +- [bug] JAVA-557: Avoid deadlock when multiple connections to the same host get write + errors. +- [improvement] JAVA-504: Make shuffle=true the default for TokenAwarePolicy. +- [bug] JAVA-577: Fix bug when SUSPECT reconnection succeeds, but one of the pooled + connections fails while bringing the node back up. +- [bug] JAVA-419: JAVA-587: Prevent faulty control connection from ignoring reconnecting hosts. +- temporarily revert "Add idle timeout to the connection pool". +- [bug] JAVA-593: Ensure updateCreatedPools does not add pools for suspected hosts. +- [bug] JAVA-594: Ensure state change notifications for a given host are handled serially. +- [bug] JAVA-597: Ensure control connection reconnects when control host is removed. + + +### 2.1.3 + +- [bug] JAVA-510: Ignore static fields in mapper. +- [bug] JAVA-509: Fix UDT parsing at init when using the default protocol version. +- [bug] JAVA-495: Fix toString, equals and hashCode on accessor proxies. +- [bug] JAVA-528: Allow empty name on Column and Field annotations. + +Merged from 2.0 branch: + +- [bug] JAVA-497: Ensure control connection does not trigger concurrent reconnects. +- [improvement] JAVA-472: Keep trying to reconnect on authentication errors. +- [improvement] JAVA-463: Expose close method on load balancing policy. +- [improvement] JAVA-459: Allow load balancing policy to trigger refresh for a single host. +- [bug] JAVA-493: Expose an API to cancel reconnection attempts. +- [bug] JAVA-503: Fix NPE when a connection fails during pool construction. +- [improvement] JAVA-423: Log datacenter name in DCAware policy's init when it is explicitly provided. +- [improvement] JAVA-504: Shuffle the replicas in TokenAwarePolicy.newQueryPlan. +- [improvement] JAVA-507: Make schema agreement wait tuneable. +- [improvement] JAVA-494: Document how to inject the driver metrics into another registry. +- [improvement] JAVA-419: Add idle timeout to the connection pool. +- [bug] JAVA-516: LatencyAwarePolicy does not shutdown executor on invocation of close. +- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with + an explicit but null or empty local datacenter. +- [bug] JAVA-511: Fix check for local contact points in DCAware policy's init. +- [improvement] JAVA-457: Make timeout on saturated pool customizable. +- [improvement] JAVA-521: Downgrade Guava to 14.0.1. +- [bug] JAVA-526: Fix token awareness for case-sensitive keyspaces and tables. +- [bug] JAVA-515: Check maximum number of values passed to SimpleStatement. +- [improvement] JAVA-532: Expose the driver version through the API. +- [improvement] JAVA-522: Optimize session initialization when some hosts are not + responsive. + + +### 2.1.2 + +- [improvement] JAVA-361, JAVA-364, JAVA-467: Support for native protocol v3. +- [bug] JAVA-454: Fix UDT fields of type inet in QueryBuilder. +- [bug] JAVA-455: Exclude transient fields from Frozen checks. +- [bug] JAVA-453: Fix handling of null collections in mapper. +- [improvement] JAVA-452: Make implicit column names case-insensitive in mapper. +- [bug] JAVA-433: Fix named bind markers in QueryBuilder. +- [bug] JAVA-458: Fix handling of BigInteger in object mapper. +- [bug] JAVA-465: Ignore synthetic fields in mapper. +- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with + an explicit but null or empty local datacenter. +- [improvement] JAVA-469: Add backwards-compatible DataType.serialize methods. +- [bug] JAVA-487: Handle null enum fields in object mapper. +- [bug] JAVA-499: Handle null UDT fields in object mapper. + +Merged from 2.0 branch: + +- [bug] JAVA-449: Handle null pool in PooledConnection.release. +- [improvement] JAVA-425: Defunct connection on request timeout. +- [improvement] JAVA-426: Try next host when we get a SERVER_ERROR. +- [bug] JAVA-449, JAVA-460, JAVA-471: Handle race between query timeout and completion. +- [bug] JAVA-496: Fix DCAwareRoundRobinPolicy datacenter auto-discovery. + + +### 2.1.1 + +- [new] JAVA-441: Support for new "frozen" keyword. + +Merged from 2.0 branch: + +- [bug] JAVA-397: Check cluster name when connecting to a new node. +- [bug] JAVA-326: Add missing CAS delete support in QueryBuilder. +- [bug] JAVA-363: Add collection and data length checks during serialization. +- [improvement] JAVA-329: Surface number of retries in metrics. +- [bug] JAVA-428: Do not use a host when no rpc_address found for it. +- [improvement] JAVA-358: Add ResultSet.wasApplied() for conditional queries. +- [bug] JAVA-349: Fix negative HostConnectionPool open count. +- [improvement] JAVA-436: Log more connection details at trace and debug levels. +- [bug] JAVA-445: Fix cluster shutdown. + + +### 2.1.0 + +- [bug] JAVA-408: ClusteringColumn annotation not working with specified ordering. +- [improvement] JAVA-410: Fail BoundStatement if null values are not set explicitly. +- [bug] JAVA-416: Handle UDT and tuples in BuiltStatement.toString. + +Merged from 2.0 branch: + +- [bug] JAVA-407: Release connections on ResultSetFuture#cancel. +- [bug] JAVA-393: Fix handling of SimpleStatement with values in query builder + batches. +- [bug] JAVA-417: Ensure pool is properly closed in onDown. +- [bug] JAVA-415: Fix tokenMap initialization at startup. +- [bug] JAVA-418: Avoid deadlock on close. + + +### 2.1.0-rc1 + +Merged from 2.0 branch: + +- [bug] JAVA-394: Ensure defunct connections are completely closed. +- [bug] JAVA-342, JAVA-390: Fix memory and resource leak on closed Sessions. + + +### 2.1.0-beta1 + +- [new] Support for User Defined Types and tuples +- [new] Simple object mapper + +Merged from 2.0 branch: everything up to 2.0.3 (included), and the following. + +- [improvement] JAVA-204: Better handling of dead connections. +- [bug] JAVA-373: Fix potential NPE in ControlConnection. +- [bug] JAVA-291: Throws NPE when passed null for a contact point. +- [bug] JAVA-315: Avoid LoadBalancingPolicy onDown+onUp at startup. +- [bug] JAVA-343: Avoid classloader leak in Tomcat. +- [bug] JAVA-387: Avoid deadlock in onAdd/onUp. +- [bug] JAVA-377, JAVA-391: Make metadata parsing more lenient. + + +### 2.0.12.2 + +- [bug] JAVA-1179: Request objects should be copied when executed. +- [improvement] JAVA-1182: Throw error when synchronous call made on I/O thread. +- [bug] JAVA-1184: Unwrap StatementWrappers when extracting column definitions. + + +### 2.0.12.1 + +- [bug] JAVA-994: Don't call on(Up|Down|Add|Remove) methods if Cluster is closed/closing. +- [improvement] JAVA-805: Document that metrics are null until Cluster is initialized. +- [bug] JAVA-1072: Ensure defunct connections are properly evicted from the pool. + + +### 2.0.12 + +- [bug] JAVA-950: Fix Cluster.connect with a case-sensitive keyspace. +- [improvement] JAVA-920: Downgrade "error creating pool" message to WARN. +- [bug] JAVA-954: Don't trigger reconnection before initialization complete. +- [improvement] JAVA-914: Avoid rejected tasks at shutdown. +- [improvement] JAVA-921: Add SimpleStatement.getValuesCount(). +- [bug] JAVA-901: Move call to connection.release() out of cancelHandler. +- [bug] JAVA-960: Avoid race in control connection shutdown. +- [bug] JAVA-656: Fix NPE in ControlConnection.updateLocationInfo. +- [bug] JAVA-966: Count uninitialized connections in conviction policy. +- [improvement] JAVA-917: Document SSL configuration. +- [improvement] JAVA-652: Add DCAwareRoundRobinPolicy builder. +- [improvement] JAVA-808: Add generic filtering policy that can be used to exclude specific DCs. + + +### 2.0.11 + +- [improvement] JAVA-718: Log streamid at the trace level on sending request and receiving response. +- [bug] JAVA-796: Fix SpeculativeExecutionPolicy.init() and close() are never called. +- [improvement] JAVA-710: Suppress unnecessary warning at shutdown. +- [improvement] #340: Allow DNS name with multiple A-records as contact point. +- [bug] JAVA-794: Allow tracing across multiple result pages. +- [bug] JAVA-737: DowngradingConsistencyRetryPolicy ignores write timeouts. +- [bug] JAVA-736: Forbid bind marker in QueryBuilder add/append/prepend. +- [bug] JAVA-712: Prevent QueryBuilder.quote() from applying duplicate double quotes. +- [bug] JAVA-688: Prevent QueryBuilder from trying to serialize raw string. +- [bug] JAVA-679: Support bind marker in QueryBuilder DELETE's list index. +- [improvement] JAVA-475: Improve QueryBuilder API for SELECT DISTINCT. +- [improvement] JAVA-225: Create values() function for Insert builder using List. +- [improvement] JAVA-702: Warn when ReplicationStrategy encounters invalid + replication factors. +- [improvement] JAVA-662: Add PoolingOptions method to set both core and max + connections. +- [improvement] JAVA-766: Do not include epoll JAR in binary distribution. +- [improvement] JAVA-726: Optimize internal copies of Request objects. +- [bug] JAVA-815: Preserve tracing across retries. +- [improvement] JAVA-709: New RetryDecision.tryNextHost(). +- [bug] JAVA-733: Handle function calls and raw strings as non-idempotent in QueryBuilder. +- [improvement] JAVA-765: Provide API to retrieve values of a Parameterized SimpleStatement. +- [improvement] JAVA-827: implement UPDATE .. IF EXISTS in QueryBuilder. +- [improvement] JAVA-618: Randomize contact points list to prevent hotspots. +- [improvement] JAVA-720: Surface the coordinator used on query failure. +- [bug] JAVA-792: Handle contact points removed during init. +- [improvement] JAVA-719: Allow PlainTextAuthProvider to change its credentials at runtime. +- [new feature] JAVA-151: Make it possible to register for SchemaChange Events. +- [improvement] JAVA-861: Downgrade "Asked to rebuild table" log from ERROR to INFO level. +- [improvement] JAVA-797: Provide an option to prepare statements only on one node. +- [improvement] JAVA-658: Provide an option to not re-prepare all statements in onUp. +- [improvement] JAVA-853: Customizable creation of netty timer. +- [bug] JAVA-859: Avoid quadratic ring processing with invalid replication factors. +- [improvement] JAVA-657: Debounce control connection queries. +- [bug] JAVA-784: LoadBalancingPolicy.distance() called before init(). +- [new feature] JAVA-828: Make driver-side metadata optional. +- [improvement] JAVA-544: Allow hosts to remain partially up. +- [improvement] JAVA-821, JAVA-822: Remove internal blocking calls and expose async session + creation. +- [improvement] JAVA-725: Use parallel calls when re-preparing statement on other + hosts. +- [bug] JAVA-629: Don't use connection timeout for unrelated internal queries. +- [bug] JAVA-892: Fix NPE in speculative executions when metrics disabled. + +Merged from 2.0.10_fixes branch: + +- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. +- [improvement] JAVA-759: Expose "unsafe" paging state API. +- [bug] JAVA-767: Fix getObject by name. +- [bug] JAVA-768: Prevent race during pool initialization. + + +### 2.0.10.1 + +- [improvement] JAVA-756: Use Netty's pooled ByteBufAllocator by default. +- [improvement] JAVA-759: Expose "unsafe" paging state API. +- [bug] JAVA-767: Fix getObject by name. +- [bug] JAVA-768: Prevent race during pool initialization. + + +### 2.0.10 + +- [new feature] JAVA-518: Add AddressTranslater for EC2 multi-region deployment. +- [improvement] JAVA-533: Add connection heartbeat. +- [improvement] JAVA-568: Reduce level of logs on missing rpc_address. +- [improvement] JAVA-312, JAVA-681: Expose node token and range information. +- [bug] JAVA-595: Fix cluster name mismatch check at startup. +- [bug] JAVA-620: Fix guava dependency when using OSGI. +- [bug] JAVA-678: Fix handling of DROP events when ks name is case-sensitive. +- [improvement] JAVA-631: Use List instead of List in QueryBuilder API. +- [improvement] JAVA-654: Exclude Netty POM from META-INF in shaded JAR. +- [bug] JAVA-655: Quote single quotes contained in table comments in asCQLQuery method. +- [bug] JAVA-684: Empty TokenRange returned in a one token cluster. +- [improvement] JAVA-687: Expose TokenRange#contains. +- [new feature] JAVA-547: Expose values of BoundStatement. +- [new feature] JAVA-584: Add getObject to BoundStatement and Row. +- [improvement] JAVA-419: Improve connection pool resizing algorithm. +- [bug] JAVA-599: Fix race condition between pool expansion and shutdown. +- [improvement] JAVA-622: Upgrade Netty to 4.0.27. +- [improvement] JAVA-562: Coalesce frames before flushing them to the connection. +- [improvement] JAVA-583: Rename threads to indicate that they are for the driver. +- [new feature] JAVA-550: Expose paging state. +- [new feature] JAVA-646: Slow Query Logger. +- [improvement] JAVA-698: Exclude some errors from measurements in LatencyAwarePolicy. +- [bug] JAVA-641: Fix issue when executing a PreparedStatement from another cluster. +- [improvement] JAVA-534: Log keyspace xxx does not exist at WARN level. +- [improvement] JAVA-619: Allow Cluster subclasses to delegate to another instance. +- [new feature] JAVA-669: Expose an API to check for schema agreement after a + schema-altering statement. +- [improvement] JAVA-692: Make connection and pool creation fully async. +- [improvement] JAVA-505: Optimize connection use after reconnection. +- [improvement] JAVA-617: Remove "suspected" mechanism. +- [improvement] reverts JAVA-425: Don't mark connection defunct on client timeout. +- [new feature] JAVA-561: Speculative query executions. +- [bug] JAVA-666: Release connection before completing the ResultSetFuture. +- [new feature BETA] JAVA-723: Percentile-based variant of query logger and speculative + executions. +- [bug] JAVA-734: Fix buffer leaks when compression is enabled. + +Merged from 2.0.9_fixes branch: + +- [bug] JAVA-614: Prevent race between cancellation and query completion. +- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if + streamId was already released and reused. +- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. +- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. +- [bug] JAVA-651: Fix edge cases where a connection was released twice. +- [bug] JAVA-653: Fix edge cases in query cancellation. + + +### 2.0.9.2 + +- [bug] JAVA-651: Fix edge cases where a connection was released twice. +- [bug] JAVA-653: Fix edge cases in query cancellation. + + +### 2.0.9.1 + +- [bug] JAVA-614: Prevent race between cancellation and query completion. +- [bug] JAVA-632: Prevent cancel and timeout from cancelling unrelated ResponseHandler if + streamId was already released and reused. +- [bug] JAVA-642: Fix issue when newly opened pool fails before we could mark the node UP. +- [bug] JAVA-613: Fix unwanted LBP notifications when a contact host is down. + + +### 2.0.9 + +- [improvement] JAVA-538: Shade Netty dependency. +- [improvement] JAVA-543: Target schema refreshes more precisely. +- [bug] JAVA-546: Don't check rpc_address for control host. +- [improvement] JAVA-409: Improve message of NoHostAvailableException. +- [bug] JAVA-556: Rework connection reaper to avoid deadlock. +- [bug] JAVA-557: Avoid deadlock when multiple connections to the same host get write + errors. +- [improvement] JAVA-504: Make shuffle=true the default for TokenAwarePolicy. +- [bug] JAVA-577: Fix bug when SUSPECT reconnection succeeds, but one of the pooled + connections fails while bringing the node back up. +- [bug] JAVA-419: JAVA-587: Prevent faulty control connection from ignoring reconnecting hosts. +- temporarily revert "Add idle timeout to the connection pool". +- [bug] JAVA-593: Ensure updateCreatedPools does not add pools for suspected hosts. +- [bug] JAVA-594: Ensure state change notifications for a given host are handled serially. +- [bug] JAVA-597: Ensure control connection reconnects when control host is removed. + + +### 2.0.8 + +- [bug] JAVA-526: Fix token awareness for case-sensitive keyspaces and tables. +- [bug] JAVA-515: Check maximum number of values passed to SimpleStatement. +- [improvement] JAVA-532: Expose the driver version through the API. +- [improvement] JAVA-522: Optimize session initialization when some hosts are not + responsive. + + +### 2.0.7 + +- [bug] JAVA-449: Handle null pool in PooledConnection.release. +- [improvement] JAVA-425: Defunct connection on request timeout. +- [improvement] JAVA-426: Try next host when we get a SERVER_ERROR. +- [bug] JAVA-449, JAVA-460, JAVA-471: Handle race between query timeout and completion. +- [bug] JAVA-496: Fix DCAwareRoundRobinPolicy datacenter auto-discovery. +- [bug] JAVA-497: Ensure control connection does not trigger concurrent reconnects. +- [improvement] JAVA-472: Keep trying to reconnect on authentication errors. +- [improvement] JAVA-463: Expose close method on load balancing policy. +- [improvement] JAVA-459: Allow load balancing policy to trigger refresh for a single host. +- [bug] JAVA-493: Expose an API to cancel reconnection attempts. +- [bug] JAVA-503: Fix NPE when a connection fails during pool construction. +- [improvement] JAVA-423: Log datacenter name in DCAware policy's init when it is explicitly provided. +- [improvement] JAVA-504: Shuffle the replicas in TokenAwarePolicy.newQueryPlan. +- [improvement] JAVA-507: Make schema agreement wait tuneable. +- [improvement] JAVA-494: Document how to inject the driver metrics into another registry. +- [improvement] JAVA-419: Add idle timeout to the connection pool. +- [bug] JAVA-516: LatencyAwarePolicy does not shutdown executor on invocation of close. +- [improvement] JAVA-451: Throw an exception when DCAwareRoundRobinPolicy is built with + an explicit but null or empty local datacenter. +- [bug] JAVA-511: Fix check for local contact points in DCAware policy's init. +- [improvement] JAVA-457: Make timeout on saturated pool customizable. +- [improvement] JAVA-521: Downgrade Guava to 14.0.1. + + +### 2.0.6 + +- [bug] JAVA-397: Check cluster name when connecting to a new node. +- [bug] JAVA-326: Add missing CAS delete support in QueryBuilder. +- [bug] JAVA-363: Add collection and data length checks during serialization. +- [improvement] JAVA-329: Surface number of retries in metrics. +- [bug] JAVA-428: Do not use a host when no rpc_address found for it. +- [improvement] JAVA-358: Add ResultSet.wasApplied() for conditional queries. +- [bug] JAVA-349: Fix negative HostConnectionPool open count. +- [improvement] JAVA-436: Log more connection details at trace and debug levels. +- [bug] JAVA-445: Fix cluster shutdown. +- [improvement] JAVA-439: Expose child policy in chainable load balancing policies. + + +### 2.0.5 + +- [bug] JAVA-407: Release connections on ResultSetFuture#cancel. +- [bug] JAVA-393: Fix handling of SimpleStatement with values in query builder + batches. +- [bug] JAVA-417: Ensure pool is properly closed in onDown. +- [bug] JAVA-415: Fix tokenMap initialization at startup. +- [bug] JAVA-418: Avoid deadlock on close. + + +### 2.0.4 + +- [improvement] JAVA-204: Better handling of dead connections. +- [bug] JAVA-373: Fix potential NPE in ControlConnection. +- [bug] JAVA-291: Throws NPE when passed null for a contact point. +- [bug] JAVA-315: Avoid LoadBalancingPolicy onDown+onUp at startup. +- [bug] JAVA-343: Avoid classloader leak in Tomcat. +- [bug] JAVA-387: Avoid deadlock in onAdd/onUp. +- [bug] JAVA-377, JAVA-391: Make metadata parsing more lenient. +- [bug] JAVA-394: Ensure defunct connections are completely closed. +- [bug] JAVA-342, JAVA-390: Fix memory and resource leak on closed Sessions. + + +### 2.0.3 + +- [new] The new AbsractSession makes mocking of Session easier. +- [new] JAVA-309: Allow to trigger a refresh of connected hosts. +- [new] JAVA-265: New Session#getState method allows to grab information on + which nodes a session is connected to. +- [new] JAVA-327: Add QueryBuilder syntax for tuples in where clauses (syntax + introduced in Cassandra 2.0.6). +- [improvement] JAVA-359: Properly validate arguments of PoolingOptions methods. +- [bug] JAVA-368: Fix bogus rejection of BigInteger in 'execute with values'. +- [bug] JAVA-367: Signal connection failure sooner to avoid missing them. +- [bug] JAVA-337: Throw UnsupportedOperationException for protocol batch + setSerialCL. + +Merged from 1.0 branch: + +- [bug] JAVA-325: Fix periodic reconnection to down hosts. + + +### 2.0.2 + +- [api] The type of the map key returned by NoHostAvailable#getErrors has changed from + InetAddress to InetSocketAddress. Same for Initializer#getContactPoints return and + for AuthProvider#newAuthenticator. +- [api] JAVA-296: The default load balacing policy is now DCAwareRoundRobinPolicy, and the local + datacenter is automatically picked based on the first connected node. Furthermore, + the TokenAwarePolicy is also used by default. +- [new] JAVA-145: New optional AddressTranslater. +- [bug] JAVA-321: Don't remove quotes on keyspace in the query builder. +- [bug] JAVA-320: Fix potential NPE while cluster undergo schema changes. +- [bug] JAVA-319: Fix thread-safety of page fetching. +- [bug] JAVA-318: Fix potential NPE using fetchMoreResults. + +Merged from 1.0 branch: + +- [new] JAVA-179: Expose the name of the partitioner in use in the cluster metadata. +- [new] Add new WhiteListPolicy to limit the nodes connected to a particular list. +- [improvement] JAVA-289: Do not hop DC for LOCAL_* CL in DCAwareRoundRobinPolicy. +- [bug] JAVA-313: Revert back to longs for dates in the query builder. +- [bug] JAVA-314: Don't reconnect to nodes ignored by the load balancing policy. + + +### 2.0.1 + +- [improvement] JAVA-278: Handle the static columns introduced in Cassandra 2.0.6. +- [improvement] JAVA-208: Add Cluster#newSession method to create Session without connecting + right away. +- [bug] JAVA-279: Add missing iso8601 patterns for parsing dates. +- [bug] Properly parse BytesType as the blob type. +- [bug] JAVA-280: Potential NPE when parsing schema of pre-CQL tables of C* 1.2 nodes. + +Merged from 1.0 branch: + +- [bug] JAVA-275: LatencyAwarePolicy.Builder#withScale doesn't set the scale. +- [new] JAVA-114: Add methods to check if a Cluster/Session instance has been closed already. + + +### 2.0.0 + +- [api] JAVA-269: Case sensitive identifier by default in Metadata. +- [bug] JAVA-274: Fix potential NPE in Cluster#connect. + +Merged from 1.0 branch: + +- [bug] JAVA-263: Always return the PreparedStatement object that is cache internally. +- [bug] JAVA-261: Fix race when multiple connect are done in parallel. +- [bug] JAVA-270: Don't connect at all to nodes that are ignored by the load balancing + policy. + + +### 2.0.0-rc3 + +- [improvement] The protocol version 1 is now supported (features only supported by the + version 2 of the protocol throw UnsupportedFeatureException). +- [improvement] JAVA-195: Make most main objects interface to facilitate testing/mocking. +- [improvement] Adds new getStatements and clear methods to BatchStatement. +- [api] JAVA-247: Renamed shutdown to closeAsync and ShutdownFuture to CloseFuture. Clustering + and Session also now implement Closeable. +- [bug] JAVA-232: Fix potential thread leaks when shutting down Metrics. +- [bug] JAVA-231: Fix potential NPE in HostConnectionPool. +- [bug] JAVA-244: Avoid NPE when node is in an unconfigured DC. +- [bug] JAVA-258: Don't block for scheduled reconnections on Cluster#close. + +Merged from 1.0 branch: + +- [new] JAVA-224: Added Session#prepareAsync calls. +- [new] JAVA-249: Added Cluster#getLoggedKeyspace. +- [improvement] Avoid preparing a statement multiple time per host with multiple sessions. +- [bug] JAVA-255: Make sure connections are returned to the right pools. +- [bug] JAVA-264: Use date string in query build to work-around CASSANDRA-6718. + + +### 2.0.0-rc2 + +- [new] JAVA-207: Add LOCAL_ONE consistency level support (requires using C* 2.0.2+). +- [bug] JAVA-219: Fix parsing of counter types. +- [bug] JAVA-218: Fix missing whitespace for IN clause in the query builder. +- [bug] JAVA-221: Fix replicas computation for token aware balancing. + +Merged from 1.0 branch: + +- [bug] JAVA-213: Fix regression from JAVA-201. +- [improvement] New getter to obtain a snapshot of the scores maintained by + LatencyAwarePolicy. + + +### 2.0.0-rc1 + +- [new] JAVA-199: Mark compression dependencies optional in maven. +- [api] Renamed TableMetadata#getClusteringKey to TableMetadata#getClusteringColumns. + +Merged from 1.0 branch: + +- [new] JAVA-142: OSGi bundle. +- [improvement] JAVA-205: Make collections returned by Row immutable. +- [improvement] JAVA-203: Limit internal thread pool size. +- [bug] JAVA-201: Don't retain unused PreparedStatement in memory. +- [bug] Add missing clustering order info in TableMetadata +- [bug] JAVA-196: Allow bind markers for collections in the query builder. + + +### 2.0.0-beta2 + +- [api] BoundStatement#setX(String, X) methods now set all values (if there is + more than one) having the provided name, not just the first occurence. +- [api] The Authenticator interface now has a onAuthenticationSuccess method that + allows to handle the potential last token sent by the server. +- [new] The query builder don't serialize large values to strings anymore by + default by making use the new ability to send values alongside the query string. +- [new] JAVA-140: The query builder has been updated for new CQL features. +- [bug] Fix exception when a conditional write timeout C* side. +- [bug] JAVA-182: Ensure connection is created when Cluster metadata are asked for. +- [bug] JAVA-187: Fix potential NPE during authentication. + + +### 2.0.0-beta1 + +- [api] The 2.0 version is an API-breaking upgrade of the driver. While most + of the breaking changes are minor, there are too numerous to be listed here + and you are encouraged to look at the Upgrade_guide_to_2.0 file that describe + those changes in details. +- [new] LZ4 compression is supported for the protocol. +- [new] JAVA-39: The driver does not depend on cassandra-all anymore. +- [new] New BatchStatement class allows to execute batch other statements. +- [new] Large ResultSet are now paged (incrementally fetched) by default. +- [new] SimpleStatement support values for bind-variables, to allow + prepare+execute behavior with one roundtrip. +- [new] Query parameters defaults (Consistency level, page size, ...) can be + configured globally. +- [new] New Cassandra 2.0 SERIAL and LOCAL_SERIAL consistency levels are + supported. +- [new] JAVA-116: Cluster#shutdown now waits for ongoing queries to complete by default. +- [new] Generic authentication through SASL is now exposed. +- [bug] JAVA-88: TokenAwarePolicy now takes all replica into account, instead of only the + first one. + + +### 1.0.5 + +- [new] JAVA-142: OSGi bundle. +- [new] JAVA-207: Add support for ConsistencyLevel.LOCAL_ONE; note that this + require Cassandra 1.2.12+. +- [improvement] JAVA-205: Make collections returned by Row immutable. +- [improvement] JAVA-203: Limit internal thread pool size. +- [improvement] New getter to obtain a snapshot of the scores maintained by + LatencyAwarePolicy. +- [improvement] JAVA-222: Avoid synchronization when getting codec for collection + types. +- [bug] JAVA-201, JAVA-213: Don't retain unused PreparedStatement in memory. +- [bug] Add missing clustering order info in TableMetadata +- [bug] JAVA-196: Allow bind markers for collections in the query builder. + + +### 1.0.4 + +- [api] JAVA-163: The Cluster.Builder#poolingOptions and Cluster.Builder#socketOptions + are now deprecated. They are replaced by the new withPoolingOptions and + withSocketOptions methods. +- [new] JAVA-129: A new LatencyAwarePolicy wrapping policy has been added, allowing to + add latency awareness to a wrapped load balancing policy. +- [new] JAVA-161: Cluster.Builder#deferInitialization: Allow defering cluster initialization. +- [new] JAVA-117: Add truncate statement in query builder. +- [new] JAVA-106: Support empty IN in the query builder. +- [bug] JAVA-166: Fix spurious "No current pool set; this should not happen" error + message. +- [bug] JAVA-184: Fix potential overflow in RoundRobinPolicy and correctly errors if + a balancing policy throws. +- [bug] Don't release Stream ID for timeouted queries (unless we do get back + the response) +- [bug] Correctly escape identifiers and use fully qualified table names when + exporting schema as string. + + +### 1.0.3 + +- [api] The query builder now correctly throw an exception when given a value + of a type it doesn't know about. +- [new] SocketOptions#setReadTimeout allows to set a timeout on how long we + wait for the answer of one node. See the javadoc for more details. +- [new] New Session#prepare method that takes a Statement. +- [bug] JAVA-143: Always take per-query CL, tracing, etc. into account for QueryBuilder + statements. +- [bug] Temporary fixup for TimestampType when talking to C* 2.0 nodes. + + +### 1.0.2 + +- [api] Host#getMonitor and all Host.HealthMonitor methods have been + deprecated. The new Host#isUp method is now prefered to the method + in the monitor and you should now register Host.StateListener against + the Cluster object directly (registering against a host HealthMonitor + was much more limited anyway). +- [new] JAVA-92: New serialize/deserialize methods in DataType to serialize/deserialize + values to/from bytes. +- [new] JAVA-128: New getIndexOf() method in ColumnDefinitions to find the index of + a given column name. +- [bug] JAVA-131: Fix a bug when thread could get blocked while setting the current + keyspace. +- [bug] JAVA-136: Quote inet addresses in the query builder since CQL3 requires it. + + +### 1.0.1 + +- [api] JAVA-100: Function call handling in the query builder has been modified in a + backward incompatible way. Function calls are not parsed from string values + anymore as this wasn't safe. Instead the new 'fcall' method should be used. +- [api] Some typos in method names in PoolingOptions have been fixed in a + backward incompatible way before the API get widespread. +- [bug] JAVA-123: Don't destroy composite partition key with BoundStatement and + TokenAwarePolicy. +- [new] null values support in the query builder. +- [new] JAVA-5: SSL support (requires C* >= 1.2.1). +- [new] JAVA-113: Allow generating unlogged batch in the query builder. +- [improvement] Better error message when no host are available. +- [improvement] Improves performance of the stress example application been. + + +### 1.0.0 + +- [api] The AuthInfoProvider has be (temporarily) removed. Instead, the + Cluster builder has a new withCredentials() method to provide a username + and password for use with Cassandra's PasswordAuthenticator. Custom + authenticator will be re-introduced in a future version but are not + supported at the moment. +- [api] The isMetricsEnabled() method in Configuration has been replaced by + getMetricsOptions(). An option to disabled JMX reporting (on by default) + has been added. +- [bug] JAVA-91: Don't make default load balancing policy a static singleton since it + is stateful. + + +### 1.0.0-RC1 + +- [new] JAVA-79: Null values are now supported in BoundStatement (but you will need at + least Cassandra 1.2.3 for it to work). The API of BoundStatement has been + slightly changed so that not binding a variable is not an error anymore, + the variable is simply considered null by default. The isReady() method has + been removed. +- [improvement] JAVA-75: The Cluster/Session shutdown methods now properly block until + the shutdown is complete. A version with at timeout has been added. +- [bug] JAVA-44: Fix use of CQL3 functions in the query builder. +- [bug] JAVA-77: Fix case where multiple schema changes too quickly wouldn't work + (only triggered when 0.0.0.0 was used for the rpc_address on the Cassandra + nodes). +- [bug] JAVA-72: Fix IllegalStateException thrown due to a reconnection made on an I/O + thread. +- [bug] JAVA-82: Correctly reports errors during authentication phase. + + +### 1.0.0-beta2 + +- [new] JAVA-51, JAVA-60, JAVA-58: Support blob constants, BigInteger, BigDecimal and counter batches in + the query builder. +- [new] JAVA-61: Basic support for custom CQL3 types. +- [new] JAVA-65: Add "execution infos" for a result set (this also move the query + trace in the new ExecutionInfos object, so users of beta1 will have to + update). +- [bug] JAVA-62: Fix failover bug in DCAwareRoundRobinPolicy. +- [bug] JAVA-66: Fix use of bind markers for routing keys in the query builder. + + +### 1.0.0-beta1 + +- initial release diff --git a/ci/create-user.sh b/ci/create-user.sh new file mode 100644 index 00000000000..fb193df9a00 --- /dev/null +++ b/ci/create-user.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################ +# +# Prep +# +################################ + +if [ "$1" == "-h" ]; then + echo "$0 [-h] " + echo " this script is used internally by other scripts in the same directory to create a user with the running host user's same uid and gid" + exit 1 +fi + +# arguments +username=$1 +uid=$2 +gid=$3 +BUILD_HOME=$4 + +################################ +# +# Main +# +################################ + +# disable git directory ownership checks +su ${username} -c "git config --global safe.directory '*'" + +if grep "^ID=" /etc/os-release | grep -q 'debian\|ubuntu' ; then + deluser docker + adduser --quiet --disabled-login --no-create-home --uid $uid --gecos ${username} ${username} + groupmod --non-unique -g $gid $username + gpasswd -a ${username} sudo >/dev/null +else + adduser --no-create-home --uid $uid ${username} +fi + +# sudo priviledges +echo "${username} ALL=(root) NOPASSWD:ALL" > /etc/sudoers.d/${username} +chmod 0440 /etc/sudoers.d/${username} + +# proper permissions +chown -R ${username}:${username} /home/docker +chmod og+wx ${BUILD_HOME} \ No newline at end of file diff --git a/ci/run-tests.sh b/ci/run-tests.sh new file mode 100755 index 00000000000..5268bdd7113 --- /dev/null +++ b/ci/run-tests.sh @@ -0,0 +1,12 @@ +#!/bin/bash -x + +. ~/.jabba/jabba.sh +. ~/env.txt +cd $(dirname "$(readlink -f "$0")")/.. +printenv | sort +mvn -B -V install -DskipTests -Dmaven.javadoc.skip=true +jabba use ${TEST_JAVA_VERSION} +# Find out the latest patch version of Cassandra +PATCH_SERVER_VERSION=$(curl -s https://downloads.apache.org/cassandra/ | grep -oP '(?<=href=\")[0-9]+\.[0-9]+\.[0-9]+(?=)' | sort -rV | uniq -w 3 | grep $SERVER_VERSION) +printenv | sort +mvn -B -V verify -T 1 -Ptest-jdk-${TEST_JAVA_MAJOR_VERSION} -DtestJavaHome=$(jabba which ${TEST_JAVA_VERSION}) -Dccm.version=${PATCH_SERVER_VERSION} -Dccm.dse=false -Dmaven.test.failure.ignore=true -Dmaven.javadoc.skip=true diff --git a/core-shaded/pom.xml b/core-shaded/pom.xml index 5e76d2018b0..84cb4b15398 100644 --- a/core-shaded/pom.xml +++ b/core-shaded/pom.xml @@ -1,13 +1,15 @@ 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-core-shaded - - DataStax Java driver for Apache Cassandra(R) - core with shaded deps - + Apache Cassandra Java Driver - core with shaded deps + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + - com.datastax.oss + org.apache.cassandra java-driver-core - ${project.version} - + + + src/main/resources + + + ${project.basedir}/.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + maven-shade-plugin @@ -123,19 +160,50 @@ - com.datastax.oss:java-driver-core + org.apache.cassandra:java-driver-core io.netty:* + com.fasterxml.jackson.core:* + io.netty com.datastax.oss.driver.shaded.netty + + com.fasterxml.jackson + com.datastax.oss.driver.shaded.fasterxml.jackson + + + + + org.apache.cassandra:* + + + META-INF/MANIFEST.MF + META-INF/maven/** + + + + io.netty:* + + META-INF/** + + + + com.fasterxml.jackson.core:* + + META-INF/** + + + @@ -152,21 +220,12 @@ - com.datastax.oss + org.apache.cassandra java-driver-core-shaded - ${project.version} jar ${project.build.outputDirectory} - - - META-INF/maven/com.datastax.oss/java-driver-core/**, - META-INF/maven/io.netty/**, - @@ -179,9 +238,8 @@ - com.datastax.oss + org.apache.cassandra java-driver-core-shaded - ${project.version} jar sources ${project.build.directory}/shaded-sources @@ -191,34 +249,30 @@ + - maven-javadoc-plugin + com.google.code.maven-replacer-plugin + replacer + 1.5.3 - attach-shaded-javadocs + shade-graalvm-files + package - jar + replace - - ${project.build.directory}/shaded-sources - - com.datastax.oss.driver.internal:com.datastax.oss.driver.shaded - - - - - org.jctools - jctools-core - 2.1.2 - - - + + false + ${project.build.directory}/classes/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json,${project.build.directory}/shaded-sources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json + + + io.netty + com.datastax.oss.driver.shaded.netty + + + org.apache.felix @@ -233,49 +287,44 @@ + com.datastax.oss.driver.core com.datastax.oss.driver.core * - - !com.datastax.oss.driver.shaded.netty.*, - !jnr.*, - !net.jcip.annotations.*, - !edu.umd.cs.findbugs.annotations.*, - !com.google.protobuf.*, - !com.jcraft.jzlib.*, - !com.ning.compress.*, - !lzma.sdk.*, - !net.jpountz.xxhash.*, - !org.bouncycastle.*, - !org.conscrypt.*, - !org.apache.commons.logging.*, - !org.apache.log4j.*, - !org.apache.logging.log4j.*, - !org.eclipse.jetty.*, - !org.jboss.marshalling.*, - !sun.misc.*, - !sun.security.*, - * + !com.datastax.oss.driver.shaded.netty.*, !com.datastax.oss.driver.shaded.fasterxml.jackson.*, + !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, + !org.graalvm.*, !com.oracle.svm.*, + jnr.*;resolution:=optional, com.esri.core.geometry.*;resolution:=optional,org.reactivestreams.*;resolution:=optional, org.apache.tinkerpop.*;resolution:=optional, org.javatuples.*;resolution:=optional, reactor.blockhound.*;resolution:=optional, + !com.google.protobuf.*, !com.jcraft.jzlib.*, !com.ning.compress.*, !lzma.sdk.*, !net.jpountz.xxhash.*, !org.bouncycastle.*, !org.conscrypt.*, !org.apache.commons.logging.*, !org.apache.log4j.*, !org.apache.logging.log4j.*, !org.eclipse.jetty.*, !org.jboss.marshalling.*, !sun.misc.*, !sun.security.*, !com.barchart.udt.*, !com.fasterxml.aalto.*, !com.sun.nio.sctp.*, !gnu.io.*, !org.xml.sax.*, !org.w3c.dom.*, !com.aayushatharva.brotli4j.*, !com.github.luben.zstd.*, * - - com.datastax.oss.driver.api.core.*, - com.datastax.oss.driver.internal.core.*, - com.datastax.oss.driver.shaded.netty.*, - + com.datastax.oss.driver.api.core.*, com.datastax.oss.driver.internal.core.*, com.datastax.dse.driver.api.core.*, com.datastax.dse.driver.internal.core.*, com.datastax.oss.driver.shaded.netty.*, com.datastax.oss.driver.shaded.fasterxml.jackson.*, true diff --git a/core-shaded/src/assembly/shaded-jar.xml b/core-shaded/src/assembly/shaded-jar.xml index 3a735f36d2a..449eb77bd1a 100644 --- a/core-shaded/src/assembly/shaded-jar.xml +++ b/core-shaded/src/assembly/shaded-jar.xml @@ -1,12 +1,15 @@ + - + shaded-jar jar @@ -41,4 +42,4 @@ pom.xml - \ No newline at end of file + diff --git a/core/console.scala b/core/console.scala index 0ae13620ff8..491add7edea 100644 --- a/core/console.scala +++ b/core/console.scala @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /* * Allows quick manual tests from the Scala console: * @@ -36,4 +55,4 @@ println("********************************************") def fire(event: AnyRef)(implicit session: CqlSession): Unit = { session.getContext.asInstanceOf[InternalDriverContext].getEventBus().fire(event) -} \ No newline at end of file +} diff --git a/core/pom.xml b/core/pom.xml index c436191ece9..8758d20d78a 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-core bundle - - DataStax Java driver for Apache Cassandra(R) - core - + Apache Cassandra Java Driver - core + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + com.datastax.oss @@ -39,8 +49,8 @@ netty-handler - com.datastax.oss - java-driver-shaded-guava + org.apache.cassandra + java-driver-guava-shaded com.typesafe @@ -53,10 +63,6 @@ These dependencies are recommended but not mandatory, the driver will fall back to pure-Java implementations if they are not available at runtime. --> - - com.github.jnr - jnr-ffi - com.github.jnr jnr-posix @@ -67,7 +73,7 @@ true - org.lz4 + at.yawk.lz4 lz4-java true @@ -83,13 +89,57 @@ org.hdrhistogram HdrHistogram + + com.esri.geometry + esri-geometry-api + true + + + org.apache.tinkerpop + gremlin-core + true + + + org.apache.tinkerpop + tinkergraph-gremlin + true + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + org.reactivestreams + reactive-streams + com.github.stephenc.jcip jcip-annotations + provided com.github.spotbugs spotbugs-annotations + provided + + + org.graalvm.sdk + graal-sdk + provided + + + org.graalvm.nativeimage + svm + provided + + + io.projectreactor.tools + blockhound + provided ch.qos.logback @@ -116,8 +166,32 @@ mockito-core test + + io.reactivex.rxjava2 + rxjava + test + + + org.reactivestreams + reactive-streams-tck + test + + + org.awaitility + awaitility + test + + + org.testng + testng + test + + + com.github.tomakehurst + wiremock + test + - @@ -134,10 +208,42 @@ false + + ${project.basedir}/.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + src/test/resources + + project.properties + + true + + + src/test/resources + + project.properties + + false + + maven-jar-plugin + + + + com.datastax.oss.driver.core + + + test-jar @@ -155,13 +261,37 @@ maven-surefire-plugin + ${testing.jvm}/bin/java + ${mockitoopens.argline} + 1 listener com.datastax.oss.driver.DriverRunListener + + + junit + false + + + suitename + Reactive Streams TCK + + + + org.apache.maven.surefire + surefire-junit47 + ${surefire.version} + + + org.apache.maven.surefire + surefire-testng + ${surefire.version} + + org.apache.felix @@ -180,24 +310,47 @@ (so reflection-based loading of policies works) --> * - - !net.jcip.annotations.*, - !edu.umd.cs.findbugs.annotations.*, - !jnr.*, - * + !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, + !org.graalvm.*, !com.oracle.svm.*, + jnr.*;resolution:=optional, com.esri.core.geometry.*;resolution:=optional, org.reactivestreams.*;resolution:=optional, org.apache.tinkerpop.*;resolution:=optional, org.javatuples.*;resolution:=optional, reactor.blockhound.*;resolution:=optional, * - - com.datastax.oss.driver.*.core.* - + com.datastax.oss.driver.*.core.*, com.datastax.dse.driver.*.core.* + + maven-dependency-plugin + + + generate-dependency-list + + list + + generate-resources + + runtime + true + com.datastax.cassandra,com.datastax.dse + ${project.build.outputDirectory}/com/datastax/dse/driver/internal/deps.txt + + + + diff --git a/core/revapi.json b/core/revapi.json index c5e5069d7fa..8c707659c13 100644 --- a/core/revapi.json +++ b/core/revapi.json @@ -1,5 +1,3 @@ -// Configures Revapi (https://revapi.org/getting-started.html) to check API compatibility between -// successive driver versions. { "revapi": { "java": { @@ -7,8 +5,8 @@ "packages": { "regex": true, "exclude": [ - "com\\.datastax\\.oss\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", "org\\.assertj(\\..+)?" ] @@ -4794,6 +4792,2626 @@ "new": "method java.util.Spliterator com.datastax.oss.driver.api.core.PagingIterable::spliterator() @ com.datastax.oss.driver.api.core.cql.ResultSet", "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", "justification": "JAVA-2247: PagingIterable implementations should implement spliterator()" + }, + { + "code": "java.method.addedToInterface", + "new": "method java.lang.String com.datastax.oss.driver.api.core.cql.Row::toString()", + "justification": "False positive -- all objects implicitly have toString()" + }, + { + "code": "java.method.addedToInterface", + "new": "method java.lang.String com.datastax.oss.driver.api.core.data.TupleValue::toString()", + "justification": "False positive -- all objects implicitly have toString()" + }, + { + "code": "java.method.addedToInterface", + "new": "method java.lang.String com.datastax.oss.driver.api.core.data.UdtValue::toString()", + "justification": "False positive -- all objects implicitly have toString()" + }, + { + "regex": true, + "code": "java.annotation.added", + "old": "field com\\.datastax\\.oss\\.driver\\.api\\.core\\.Version.V.*", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "Marking constants as non-null doesn't break existing code" + }, + { + "code": "java.annotation.removed", + "old": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", + "new": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "JAVA-2505: Annotate Node.getHostId() as nullable" + }, + { + "code": "java.annotation.added", + "old": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", + "new": "method java.util.UUID com.datastax.oss.driver.api.core.metadata.Node::getHostId()", + "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", + "justification": "JAVA-2505: Annotate Node.getHostId() as nullable" + }, + { + "code": "java.annotation.added", + "old": "parameter void com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory::(===javax.net.ssl.SSLContext===)", + "new": "parameter void com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory::(===javax.net.ssl.SSLContext===)", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "JAVA-2434: added @NonNull to ProgrammaticSslEngineFactory(SSLContext) constructor" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.PlainTextAuthenticator", + "new": "class com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.PlainTextAuthenticator", + "superClass": "com.datastax.dse.driver.api.core.auth.BaseDseAuthenticator", + "justification": "New parent doesn't add constraints for implementors" + }, + { + "code": "java.method.exception.runtimeAdded", + "old": "method com.datastax.oss.driver.api.core.auth.Authenticator com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase::newAuthenticator(com.datastax.oss.driver.api.core.metadata.EndPoint, java.lang.String)", + "new": "method com.datastax.oss.driver.api.core.auth.Authenticator com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase::newAuthenticator(com.datastax.oss.driver.api.core.metadata.EndPoint, java.lang.String) throws com.datastax.oss.driver.api.core.auth.AuthenticationException", + "exception": "com.datastax.oss.driver.api.core.auth.AuthenticationException", + "justification": "New exception is unchecked" + }, + { + "code": "java.class.superTypeTypeParametersChanged", + "old": "class com.datastax.dse.driver.api.core.DseSessionBuilderBase", + "new": "class com.datastax.dse.driver.api.core.DseSessionBuilderBase, SessionT>", + "oldSuperType": "com.datastax.oss.driver.api.core.session.SessionBuilder", + "newSuperType": "com.datastax.oss.driver.api.core.session.SessionBuilder, SessionT>", + "justification": "JAVA-2411: Type parameters were wrong but it is unlikely that implementors would notice that in subclasses" + }, + { + "code": "java.method.removed", + "old": "method org.apache.tinkerpop.gremlin.process.remote.traversal.RemoteTraversal org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submit(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.removed", + "old": "method java.util.Iterator> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submit(org.apache.tinkerpop.gremlin.process.traversal.Traversal) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.noLongerDefault", + "old": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", + "new": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.nowAbstract", + "old": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", + "new": "method java.util.concurrent.CompletableFuture> org.apache.tinkerpop.gremlin.process.remote.RemoteConnection::submitAsync(org.apache.tinkerpop.gremlin.process.traversal.Bytecode) throws org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.field.removedWithConstant", + "old": "field org.apache.tinkerpop.gremlin.process.traversal.TraversalSource.GREMLIN_REMOTE", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.field.removedWithConstant", + "old": "field org.apache.tinkerpop.gremlin.process.traversal.TraversalSource.GREMLIN_REMOTE_CONNECTION_CLASS", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter java.util.List> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(===java.util.List>===)", + "new": "parameter java.util.Set> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(===java.util.Set>===)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.returnTypeChanged", + "old": "method java.util.List> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(java.util.List>)", + "new": "method java.util.Set> org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies::sortStrategies(java.util.Set>)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.numberOfParametersChanged", + "old": "method void org.apache.tinkerpop.gremlin.process.traversal.Traverser.Admin::incrLoops(java.lang.String)", + "new": "method void org.apache.tinkerpop.gremlin.process.traversal.Traverser.Admin::incrLoops()", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.addedToInterface", + "new": "method void org.apache.tinkerpop.gremlin.process.traversal.Traverser.Admin::initialiseLoops(java.lang.String, java.lang.String)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.addedToInterface", + "new": "method int org.apache.tinkerpop.gremlin.process.traversal.Traverser::loops(java.lang.String)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max()", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::max(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min()", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::min(org.apache.tinkerpop.gremlin.process.traversal.Scope)", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal> org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::valueMap(java.lang.String[])", + "new": "method org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal> org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal::valueMap(java.lang.String[])", + "justification": "JAVA-2235: GraphBinary support - TinkerPop upgrade from 3.3 to 3.4" + }, + { + "code": "java.class.externalClassExposedInAPI", + "new": "class org.apache.tinkerpop.gremlin.process.traversal.util.ImmutableExplanation", + "justification": "Upgrade to Tinkerpop 3.4.4" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", + "new": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", + "superClass": "org.apache.tinkerpop.gremlin.process.traversal.util.AbstractExplanation", + "justification": "Upgrade to Tinkerpop 3.4.4" + }, + { + "code": "java.class.defaultSerializationChanged", + "old": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", + "new": "class org.apache.tinkerpop.gremlin.process.traversal.util.TraversalExplanation", + "justification": "Upgrade to Tinkerpop 3.4.4" + }, + { + "code": "java.annotation.added", + "old": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===com.datastax.oss.driver.api.core.CqlIdentifier===)", + "new": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===com.datastax.oss.driver.api.core.CqlIdentifier===)", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "Add missing `@NonNull` annotation to UserDefinedType.firstIndexOf" + }, + { + "code": "java.annotation.added", + "old": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===java.lang.String===)", + "new": "parameter int com.datastax.oss.driver.api.core.type.UserDefinedType::firstIndexOf(===java.lang.String===)", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "Add missing `@NonNull` annotation to UserDefinedType.firstIndexOf" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "new": "class com.fasterxml.jackson.databind.introspect.AnnotatedConstructor.Serialization", + "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "new": "class com.fasterxml.jackson.databind.introspect.AnnotatedField.Serialization", + "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "new": "class com.fasterxml.jackson.databind.introspect.AnnotatedMethod.Serialization", + "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "new": "class com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer", + "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "new": "class com.fasterxml.jackson.databind.util.PrimitiveArrayBuilder.Node", + "justification": "Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.annotation.JsonTypeInfo.Id.CUSTOM", + "new": "field com.fasterxml.jackson.annotation.JsonTypeInfo.Id.CUSTOM", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.core.Base64Variant.serialVersionUID", + "new": "field com.fasterxml.jackson.core.Base64Variant.serialVersionUID", + "serialVersionUID": "1", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.core.JsonGenerationException", + "new": "class com.fasterxml.jackson.core.JsonGenerationException", + "superClass": "com.fasterxml.jackson.core.JacksonException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.core.JsonParseException", + "new": "class com.fasterxml.jackson.core.JsonParseException", + "superClass": "com.fasterxml.jackson.core.JacksonException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.core.JsonProcessingException", + "new": "class com.fasterxml.jackson.core.JsonProcessingException", + "superClass": "com.fasterxml.jackson.core.JacksonException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "old": "class com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer.TableInfo", + "new": "class com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer.TableInfo", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "old": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.Bucket", + "new": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.Bucket", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "old": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.TableInfo", + "new": "class com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer.TableInfo", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method java.lang.String[] com.fasterxml.jackson.databind.AnnotationIntrospector::findPropertiesToIgnore(com.fasterxml.jackson.databind.introspect.Annotated)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.defaultSerializationChanged", + "old": "class com.fasterxml.jackson.databind.AnnotationIntrospector", + "new": "class com.fasterxml.jackson.databind.AnnotationIntrospector", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.DeserializationConfig.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.DeserializationConfig.serialVersionUID", + "serialVersionUID": "2", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.returnTypeChanged", + "old": "method void com.fasterxml.jackson.databind.DeserializationConfig::initialize(com.fasterxml.jackson.core.JsonParser)", + "new": "method com.fasterxml.jackson.core.JsonParser com.fasterxml.jackson.databind.DeserializationConfig::initialize(com.fasterxml.jackson.core.JsonParser)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.generics.formalTypeParameterRemoved", + "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.generics.formalTypeParameterRemoved", + "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForBuilder(com.fasterxml.jackson.databind.JavaType)", + "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.generics.formalTypeParameterRemoved", + "old": "method T com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.DeserializationConfig::introspectForCreation(com.fasterxml.jackson.databind.JavaType)", + "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.DeserializationContext.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.DeserializationContext.serialVersionUID", + "serialVersionUID": "1", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method boolean com.fasterxml.jackson.databind.DeserializationContext::isEnabled(com.fasterxml.jackson.core.StreamReadCapability)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method boolean com.fasterxml.jackson.databind.JavaType::isRecordType()", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.databind.JsonMappingException", + "new": "class com.fasterxml.jackson.databind.JsonMappingException", + "superClass": "com.fasterxml.jackson.core.JacksonException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS", + "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES", + "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_VALUES", + "new": "field com.fasterxml.jackson.databind.MapperFeature.ACCEPT_CASE_INSENSITIVE_VALUES", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_COERCION_OF_SCALARS", + "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_COERCION_OF_SCALARS", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_EXPLICIT_PROPERTY_RENAMING", + "new": "field com.fasterxml.jackson.databind.MapperFeature.ALLOW_EXPLICIT_PROPERTY_RENAMING", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.BLOCK_UNSAFE_POLYMORPHIC_BASE_TYPES", + "new": "field com.fasterxml.jackson.databind.MapperFeature.BLOCK_UNSAFE_POLYMORPHIC_BASE_TYPES", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS", + "new": "field com.fasterxml.jackson.databind.MapperFeature.CAN_OVERRIDE_ACCESS_MODIFIERS", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.DEFAULT_VIEW_INCLUSION", + "new": "field com.fasterxml.jackson.databind.MapperFeature.DEFAULT_VIEW_INCLUSION", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_DUPLICATE_MODULE_REGISTRATIONS", + "new": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_DUPLICATE_MODULE_REGISTRATIONS", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_MERGE_FOR_UNMERGEABLE", + "new": "field com.fasterxml.jackson.databind.MapperFeature.IGNORE_MERGE_FOR_UNMERGEABLE", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS", + "new": "field com.fasterxml.jackson.databind.MapperFeature.OVERRIDE_PUBLIC_ACCESS_MODIFIERS", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.SORT_PROPERTIES_ALPHABETICALLY", + "new": "field com.fasterxml.jackson.databind.MapperFeature.SORT_PROPERTIES_ALPHABETICALLY", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_BASE_TYPE_AS_DEFAULT_IMPL", + "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_BASE_TYPE_AS_DEFAULT_IMPL", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_STATIC_TYPING", + "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_STATIC_TYPING", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_STD_BEAN_NAMING", + "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_STD_BEAN_NAMING", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.MapperFeature.USE_WRAPPER_NAME_AS_PROPERTY_NAME", + "new": "field com.fasterxml.jackson.databind.MapperFeature.USE_WRAPPER_NAME_AS_PROPERTY_NAME", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.ObjectMapper.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.ObjectMapper.serialVersionUID", + "serialVersionUID": "2", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method java.lang.Object com.fasterxml.jackson.databind.ObjectMapper::_unwrapAndDeserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.JsonDeserializer) throws java.io.IOException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::_writeValueAndClose(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.exception.runtimeAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::treeToValue(com.fasterxml.jackson.core.TreeNode, java.lang.Class) throws com.fasterxml.jackson.core.JsonProcessingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::treeToValue(com.fasterxml.jackson.core.TreeNode, java.lang.Class) throws java.lang.IllegalArgumentException, com.fasterxml.jackson.core.JsonProcessingException", + "exception": "java.lang.IllegalArgumentException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method java.lang.Object com.fasterxml.jackson.databind.ObjectReader::_unwrapAndDeserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.JsonDeserializer) throws java.io.IOException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::_configAndWriteValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.returnTypeChanged", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::_configureGenerator(com.fasterxml.jackson.core.JsonGenerator)", + "new": "method com.fasterxml.jackson.core.JsonGenerator com.fasterxml.jackson.databind.ObjectWriter::_configureGenerator(com.fasterxml.jackson.core.JsonGenerator)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::_writeValueAndClose(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDChanged", + "new": "field com.fasterxml.jackson.databind.PropertyNamingStrategy.serialVersionUID", + "oldSerialVersionUID": "-5237220944964015475", + "newSerialVersionUID": "2", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method T com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.generics.formalTypeParameterRemoved", + "old": "method T com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.SerializationConfig::introspect(com.fasterxml.jackson.databind.JavaType)", + "typeParameter": "T extends com.fasterxml.jackson.databind.BeanDescription", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.cfg.BaseSettings.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.cfg.BaseSettings.serialVersionUID", + "serialVersionUID": "1", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.numberOfParametersChanged", + "old": "method void com.fasterxml.jackson.databind.cfg.BaseSettings::(com.fasterxml.jackson.databind.introspect.ClassIntrospector, com.fasterxml.jackson.databind.AnnotationIntrospector, com.fasterxml.jackson.databind.PropertyNamingStrategy, com.fasterxml.jackson.databind.type.TypeFactory, com.fasterxml.jackson.databind.jsontype.TypeResolverBuilder, java.text.DateFormat, com.fasterxml.jackson.databind.cfg.HandlerInstantiator, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.core.Base64Variant)", + "new": "method void com.fasterxml.jackson.databind.cfg.BaseSettings::(com.fasterxml.jackson.databind.introspect.ClassIntrospector, com.fasterxml.jackson.databind.AnnotationIntrospector, com.fasterxml.jackson.databind.PropertyNamingStrategy, com.fasterxml.jackson.databind.type.TypeFactory, com.fasterxml.jackson.databind.jsontype.TypeResolverBuilder, java.text.DateFormat, com.fasterxml.jackson.databind.cfg.HandlerInstantiator, java.util.Locale, java.util.TimeZone, com.fasterxml.jackson.core.Base64Variant, com.fasterxml.jackson.databind.jsontype.PolymorphicTypeValidator, com.fasterxml.jackson.databind.introspect.AccessorNamingStrategy.Provider)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method com.fasterxml.jackson.databind.introspect.AccessorNamingStrategy.Provider com.fasterxml.jackson.databind.cfg.MapperConfig>::getAccessorNaming()", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method com.fasterxml.jackson.annotation.JsonIncludeProperties.Value com.fasterxml.jackson.databind.cfg.MapperConfig>::getDefaultPropertyInclusions(java.lang.Class, com.fasterxml.jackson.databind.introspect.AnnotatedClass)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.deser.DefaultDeserializationContext.serialVersionUID", + "serialVersionUID": "1", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method com.fasterxml.jackson.databind.deser.DefaultDeserializationContext com.fasterxml.jackson.databind.deser.DefaultDeserializationContext::createDummyInstance(com.fasterxml.jackson.databind.DeserializationConfig)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", + "new": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", + "superClass": "com.fasterxml.jackson.core.JacksonException", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method com.fasterxml.jackson.databind.introspect.AnnotatedParameter com.fasterxml.jackson.databind.deser.ValueInstantiator::getIncompleteParameter()", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method void com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::(boolean, java.util.Collection)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.numberOfParametersChanged", + "old": "method com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::construct(java.util.Collection, boolean)", + "new": "method com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::construct(com.fasterxml.jackson.databind.cfg.MapperConfig, java.util.Collection, java.util.Map>, boolean)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method void com.fasterxml.jackson.databind.deser.impl.BeanPropertyMap::replace(com.fasterxml.jackson.databind.deser.SettableBeanProperty)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method com.fasterxml.jackson.databind.BeanDescription com.fasterxml.jackson.databind.introspect.ClassIntrospector::forDeserializationWithBuilder(com.fasterxml.jackson.databind.DeserializationConfig, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.introspect.ClassIntrospector.MixInResolver, com.fasterxml.jackson.databind.BeanDescription)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.numberOfParametersChanged", + "old": "method void com.fasterxml.jackson.databind.ser.BeanSerializer::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, java.util.Set)", + "new": "method void com.fasterxml.jackson.databind.ser.BeanSerializer::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, java.util.Set, java.util.Set)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.removed", + "old": "method com.fasterxml.jackson.databind.ser.impl.PropertySerializerMap com.fasterxml.jackson.databind.ser.impl.PropertySerializerMap::emptyMap()", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.visibilityReduced", + "old": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, com.fasterxml.jackson.databind.ser.BeanPropertyWriter[], com.fasterxml.jackson.databind.ser.BeanPropertyWriter[])", + "new": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::(com.fasterxml.jackson.databind.ser.std.BeanSerializerBase, com.fasterxml.jackson.databind.ser.BeanPropertyWriter[], com.fasterxml.jackson.databind.ser.BeanPropertyWriter[])", + "oldVisibility": "public", + "newVisibility": "protected", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method com.fasterxml.jackson.databind.ser.std.BeanSerializerBase com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::withByNameInclusion(java.util.Set, java.util.Set)", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method com.fasterxml.jackson.databind.ser.std.BeanSerializerBase com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::withProperties(com.fasterxml.jackson.databind.ser.BeanPropertyWriter[], com.fasterxml.jackson.databind.ser.BeanPropertyWriter[])", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.typeChanged", + "old": "field com.fasterxml.jackson.databind.type.TypeFactory._typeCache", + "new": "field com.fasterxml.jackson.databind.type.TypeFactory._typeCache", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.type.TypeFactory.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.type.TypeFactory.serialVersionUID", + "serialVersionUID": "1", + "justification": "JAVA-2904: Jackson upgraded to 2.12.0. Caused by the exposure of ObjectMapper as a parameter in ExtraTypeCodecs.json()" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.process.remote.RemoteConnection", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.P", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.Path", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.Edge", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.Property", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.Vertex", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.VertexProperty", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.process.remote.RemoteConnection", + "new": "missing-class org.apache.tinkerpop.gremlin.process.remote.RemoteConnection", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.P", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.P", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.Path", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.Path", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", + "new": "missing-class org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.structure.Edge", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.Edge", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.structure.Property", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.Property", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.structure.Vertex", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.Vertex", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class org.apache.tinkerpop.gremlin.structure.VertexProperty", + "new": "missing-class org.apache.tinkerpop.gremlin.structure.VertexProperty", + "justification": "JAVA-2907: switched Tinkerpop dependency to optional" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.core.JsonGenerationException", + "new": "class com.fasterxml.jackson.core.JsonGenerationException", + "superClass": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.serialVersionUIDChanged", + "old": "field com.fasterxml.jackson.core.JsonLocation.serialVersionUID", + "new": "field com.fasterxml.jackson.core.JsonLocation.serialVersionUID", + "oldSerialVersionUID": "1", + "newSerialVersionUID": "2", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.removed", + "old": "method java.lang.StringBuilder com.fasterxml.jackson.core.JsonLocation::_appendSourceDesc(java.lang.StringBuilder)", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.visibilityReduced", + "old": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String)", + "new": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String)", + "oldVisibility": "public", + "newVisibility": "protected", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.visibilityReduced", + "old": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, com.fasterxml.jackson.core.JsonLocation)", + "new": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, com.fasterxml.jackson.core.JsonLocation)", + "oldVisibility": "public", + "newVisibility": "protected", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.visibilityReduced", + "old": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, java.lang.Throwable)", + "new": "method void com.fasterxml.jackson.core.exc.StreamReadException::(com.fasterxml.jackson.core.JsonParser, java.lang.String, java.lang.Throwable)", + "oldVisibility": "public", + "newVisibility": "protected", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.nowFinal", + "old": "field com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer._intern", + "new": "field com.fasterxml.jackson.core.sym.ByteQuadsCanonicalizer._intern", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.removed", + "old": "method void com.fasterxml.jackson.core.sym.CharsToNameCanonicalizer::reportTooManyCollisions(int)", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method java.util.List> com.fasterxml.jackson.databind.BeanDescription::getConstructorsWithMode()", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.abstractMethodAdded", + "new": "method java.util.List> com.fasterxml.jackson.databind.BeanDescription::getFactoryMethodsWithMode()", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter com.fasterxml.jackson.databind.DeserializationConfig com.fasterxml.jackson.databind.DeserializationConfig::_withMapperFeatures(===int===)", + "new": "parameter com.fasterxml.jackson.databind.DeserializationConfig com.fasterxml.jackson.databind.DeserializationConfig::_withMapperFeatures(===long===)", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method com.fasterxml.jackson.databind.util.TokenBuffer com.fasterxml.jackson.databind.DeserializationContext::bufferForInputBuffering()", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", + "exception": "com.fasterxml.jackson.core.JacksonException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, T) throws java.io.IOException", + "new": "method T com.fasterxml.jackson.databind.JsonDeserializer::deserialize(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, T) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", + "exception": "com.fasterxml.jackson.core.JacksonException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer) throws java.io.IOException", + "new": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", + "exception": "com.fasterxml.jackson.core.JacksonException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer, T) throws java.io.IOException", + "new": "method java.lang.Object com.fasterxml.jackson.databind.JsonDeserializer::deserializeWithType(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.jsontype.TypeDeserializer, T) throws java.io.IOException, com.fasterxml.jackson.core.JacksonException", + "exception": "com.fasterxml.jackson.core.JacksonException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.databind.JsonMappingException", + "new": "class com.fasterxml.jackson.databind.JsonMappingException", + "superClass": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.JsonMappingException", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.JsonMappingException", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectMapper::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext, com.fasterxml.jackson.databind.JavaType) throws com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.removed", + "old": "method com.fasterxml.jackson.core.JsonToken com.fasterxml.jackson.databind.ObjectMapper::_initForReading(com.fasterxml.jackson.core.JsonParser) throws java.io.IOException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readTree(com.fasterxml.jackson.core.JsonParser) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readTree(com.fasterxml.jackson.core.JsonParser) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.JsonNode com.fasterxml.jackson.databind.ObjectMapper::readTree(java.io.File) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method com.fasterxml.jackson.databind.JsonNode com.fasterxml.jackson.databind.ObjectMapper::readTree(java.io.File) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], int, int, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(byte[], java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.File, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.InputStream, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.io.Reader, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonParseException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method T com.fasterxml.jackson.databind.ObjectMapper::readValue(java.net.URL, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamReadException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.ResolvedType) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.core.type.TypeReference) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, com.fasterxml.jackson.databind.JavaType) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method com.fasterxml.jackson.databind.MappingIterator com.fasterxml.jackson.databind.ObjectMapper::readValues(com.fasterxml.jackson.core.JsonParser, java.lang.Class) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.core.TreeNode) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.core.TreeNode) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.JsonNode) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeTree(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.JsonNode) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(com.fasterxml.jackson.core.JsonGenerator, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectMapper::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findRootDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.JsonMappingException", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.ObjectReader::_findTreeDeserializer(com.fasterxml.jackson.databind.DeserializationContext) throws com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.JsonParseException", + "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.exc.StreamReadException", + "exception": "com.fasterxml.jackson.core.exc.StreamReadException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.JsonParseException", + "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUndetectableSource(java.lang.Object) throws com.fasterxml.jackson.core.exc.StreamReadException", + "exception": "com.fasterxml.jackson.core.JsonParseException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws java.io.IOException", + "exception": "java.io.IOException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.ObjectReader::_reportUnkownFormat(com.fasterxml.jackson.databind.deser.DataFormatReaders, com.fasterxml.jackson.databind.deser.DataFormatReaders.Match) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.DataOutput, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.File, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.OutputStream, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.exc.StreamWriteException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException, com.fasterxml.jackson.databind.JsonMappingException", + "new": "method void com.fasterxml.jackson.databind.ObjectWriter::writeValue(java.io.Writer, java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.exc.StreamWriteException, com.fasterxml.jackson.databind.DatabindException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.SerializationConfig.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.SerializationConfig.serialVersionUID", + "serialVersionUID": "1", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter com.fasterxml.jackson.databind.SerializationConfig com.fasterxml.jackson.databind.SerializationConfig::_withMapperFeatures(===int===)", + "new": "parameter com.fasterxml.jackson.databind.SerializationConfig com.fasterxml.jackson.databind.SerializationConfig::_withMapperFeatures(===long===)", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.EAGER_SERIALIZER_FETCH", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.EAGER_SERIALIZER_FETCH", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.USE_EQUALITY_FOR_OBJECT_ID", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.USE_EQUALITY_FOR_OBJECT_ID", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_BIGDECIMAL_AS_PLAIN", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_BIGDECIMAL_AS_PLAIN", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_CHAR_ARRAYS_AS_JSON_ARRAYS", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_CHAR_ARRAYS_AS_JSON_ARRAYS", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DATE_TIMESTAMPS_AS_NANOSECONDS", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DATE_TIMESTAMPS_AS_NANOSECONDS", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_DURATIONS_AS_TIMESTAMPS", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_EMPTY_JSON_ARRAYS", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_EMPTY_JSON_ARRAYS", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_INDEX", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_INDEX", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_TO_STRING", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUMS_USING_TO_STRING", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUM_KEYS_USING_INDEX", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_ENUM_KEYS_USING_INDEX", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_NULL_MAP_VALUES", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_NULL_MAP_VALUES", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.enumConstantOrderChanged", + "old": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED", + "new": "field com.fasterxml.jackson.databind.SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.finalMethodAddedToNonFinalClass", + "new": "method com.fasterxml.jackson.databind.util.TokenBuffer com.fasterxml.jackson.databind.SerializerProvider::bufferForValueConversion()", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.removed", + "old": "method java.lang.Class com.fasterxml.jackson.databind.SerializerProvider::getSerializationView()", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.typeChanged", + "old": "field com.fasterxml.jackson.databind.cfg.MapperConfig>._mapperFeatures", + "new": "field com.fasterxml.jackson.databind.cfg.MapperConfig>._mapperFeatures", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.field.serialVersionUIDUnchanged", + "old": "field com.fasterxml.jackson.databind.cfg.MapperConfig>.serialVersionUID", + "new": "field com.fasterxml.jackson.databind.cfg.MapperConfig>.serialVersionUID", + "serialVersionUID": "2", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.BaseSettings, ===int===)", + "new": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.BaseSettings, ===long===)", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.MapperConfig, ===int===)", + "new": "parameter void com.fasterxml.jackson.databind.cfg.MapperConfig>::(com.fasterxml.jackson.databind.cfg.MapperConfig, ===long===)", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::_fixAccess(java.util.Collection)", + "new": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::_fixAccess(java.util.Collection) throws com.fasterxml.jackson.databind.JsonMappingException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addBackReferenceProperty(java.lang.String, com.fasterxml.jackson.databind.deser.SettableBeanProperty)", + "new": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addBackReferenceProperty(java.lang.String, com.fasterxml.jackson.databind.deser.SettableBeanProperty) throws com.fasterxml.jackson.databind.JsonMappingException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addInjectable(com.fasterxml.jackson.databind.PropertyName, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.util.Annotations, com.fasterxml.jackson.databind.introspect.AnnotatedMember, java.lang.Object)", + "new": "method void com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::addInjectable(com.fasterxml.jackson.databind.PropertyName, com.fasterxml.jackson.databind.JavaType, com.fasterxml.jackson.databind.util.Annotations, com.fasterxml.jackson.databind.introspect.AnnotatedMember, java.lang.Object) throws com.fasterxml.jackson.databind.JsonMappingException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedAdded", + "old": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::build()", + "new": "method com.fasterxml.jackson.databind.JsonDeserializer com.fasterxml.jackson.databind.deser.BeanDeserializerBuilder::build() throws com.fasterxml.jackson.databind.JsonMappingException", + "exception": "com.fasterxml.jackson.databind.JsonMappingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", + "new": "class com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", + "superClass": "com.fasterxml.jackson.databind.DatabindException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.deser.impl.PropertyValue::assign(java.lang.Object) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.deser.impl.PropertyValue::assign(java.lang.Object) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.node.BinaryNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.node.BinaryNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException @ com.fasterxml.jackson.databind.node.NumericNode", + "new": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException @ com.fasterxml.jackson.databind.node.NumericNode", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException @ com.fasterxml.jackson.databind.node.ValueNode", + "new": "method void com.fasterxml.jackson.databind.node.BaseJsonNode::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException @ com.fasterxml.jackson.databind.node.ValueNode", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::serializeFieldsFiltered(java.lang.Object, com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonGenerationException", + "new": "method void com.fasterxml.jackson.databind.ser.std.BeanSerializerBase::serializeFieldsFiltered(java.lang.Object, com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonGenerationException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.method.exception.checkedRemoved", + "old": "method void com.fasterxml.jackson.databind.type.TypeBase::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException, com.fasterxml.jackson.core.JsonProcessingException", + "new": "method void com.fasterxml.jackson.databind.type.TypeBase::serialize(com.fasterxml.jackson.core.JsonGenerator, com.fasterxml.jackson.databind.SerializerProvider) throws java.io.IOException", + "exception": "com.fasterxml.jackson.core.JsonProcessingException", + "justification": "Upgrade deps around JAVA-2977 to address outstanding CVEs" + }, + { + "code": "java.class.nonFinalClassInheritsFromNewClass", + "old": "class com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException", + "new": "class com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException", + "superClass": "com.datastax.oss.driver.api.core.DriverException", + "justification": "Make CodecNotFoundException to extend DriverException as all other driver exceptions do" + }, + { + "code": "java.class.removed", + "old": "class com.datastax.oss.driver.api.core.data.CqlVector", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getCqlVector(com.datastax.oss.driver.api.core.CqlIdentifier)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getCqlVector(int)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getCqlVector(java.lang.String)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>::setCqlVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setCqlVector(int, com.datastax.oss.driver.api.core.data.CqlVector)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setCqlVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.class.removed", + "old": "class com.datastax.oss.driver.api.core.type.CqlVectorType", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.returnTypeChanged", + "old": "method com.datastax.oss.driver.api.core.type.CqlVectorType com.datastax.oss.driver.api.core.type.DataTypes::vectorOf(com.datastax.oss.driver.api.core.type.DataType, int)", + "new": "method com.datastax.oss.driver.api.core.type.VectorType com.datastax.oss.driver.api.core.type.DataTypes::vectorOf(com.datastax.oss.driver.api.core.type.DataType, int)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.CqlVectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.VectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.method.removed", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "justification": "Refactoring in JAVA-3061" + }, + { + "code": "java.class.removed", + "old": "class com.datastax.oss.driver.api.core.data.CqlVector.Builder", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.removed", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector.Builder com.datastax.oss.driver.api.core.data.CqlVector::builder()", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.removed", + "old": "method java.lang.Iterable com.datastax.oss.driver.api.core.data.CqlVector::getValues()", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "class com.datastax.oss.driver.api.core.data.CqlVector", + "new": "class com.datastax.oss.driver.api.core.data.CqlVector", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.CqlVectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(===com.datastax.oss.driver.api.core.type.VectorType===, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.CqlVectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", + "new": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "justification": "Refactorings in PR 1666" + }, + { + "code": "java.method.returnTypeChangedCovariantly", + "old": "method java.lang.Throwable java.lang.Throwable::fillInStackTrace() @ com.fasterxml.jackson.databind.deser.UnresolvedForwardReference", + "new": "method com.fasterxml.jackson.databind.deser.UnresolvedForwardReference com.fasterxml.jackson.databind.deser.UnresolvedForwardReference::fillInStackTrace()", + "justification": "Upgrade jackson-databind to 2.13.4.1 to address CVEs, API change cause: https://github.com/FasterXML/jackson-databind/issues/3419" + }, + { + "code": "java.annotation.added", + "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", + "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>", + "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchStatement", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>", + "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BatchableStatement>", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", + "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.BoundStatement", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", + "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int) @ com.datastax.oss.driver.api.core.cql.SimpleStatement", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int)", + "new": "method SelfT com.datastax.oss.driver.api.core.cql.Statement>::setNowInSeconds(int)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::add(com.datastax.oss.driver.api.core.cql.BatchableStatement)", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::add(com.datastax.oss.driver.api.core.cql.BatchableStatement)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(com.datastax.oss.driver.api.core.cql.BatchableStatement[])", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(com.datastax.oss.driver.api.core.cql.BatchableStatement[])", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(java.lang.Iterable>)", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::addAll(java.lang.Iterable>)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::clear()", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::clear()", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setBatchType(com.datastax.oss.driver.api.core.cql.BatchType)", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setBatchType(com.datastax.oss.driver.api.core.cql.BatchType)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(java.lang.String)", + "new": "method com.datastax.oss.driver.api.core.cql.BatchStatement com.datastax.oss.driver.api.core.cql.BatchStatement::setKeyspace(java.lang.String)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "JAVA-2161: Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setQuery(java.lang.String)", + "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setQuery(java.lang.String)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", + "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(com.datastax.oss.driver.api.core.CqlIdentifier)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(java.lang.String)", + "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setKeyspace(java.lang.String)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setPositionalValues(java.util.List)", + "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setPositionalValues(java.util.List)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValuesWithIds(java.util.Map)", + "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValuesWithIds(java.util.Map)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.annotation.added", + "old": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValues(java.util.Map)", + "new": "method com.datastax.oss.driver.api.core.cql.SimpleStatement com.datastax.oss.driver.api.core.cql.SimpleStatement::setNamedValues(java.util.Map)", + "annotation": "@edu.umd.cs.findbugs.annotations.CheckReturnValue", + "justification": "Annotate mutating methods with @CheckReturnValue" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::from(java.lang.String, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeChanged", + "old": "method T com.datastax.oss.driver.api.core.data.CqlVector::get(int)", + "new": "method T com.datastax.oss.driver.api.core.data.CqlVector::get(int)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method java.util.Iterator com.datastax.oss.driver.api.core.data.CqlVector::iterator()", + "new": "method java.util.Iterator com.datastax.oss.driver.api.core.data.CqlVector::iterator()", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===V[]===)", + "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===V[]===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(V[])", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===java.util.List===)", + "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(===java.util.List===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::newInstance(java.util.List)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeChanged", + "old": "parameter T com.datastax.oss.driver.api.core.data.CqlVector::set(int, ===T===)", + "new": "parameter T com.datastax.oss.driver.api.core.data.CqlVector::set(int, ===T===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeChanged", + "old": "method T com.datastax.oss.driver.api.core.data.CqlVector::set(int, T)", + "new": "method T com.datastax.oss.driver.api.core.data.CqlVector::set(int, T)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method java.util.Spliterator java.lang.Iterable::spliterator() @ com.datastax.oss.driver.api.core.data.CqlVector", + "new": "method java.util.Spliterator java.lang.Iterable::spliterator() @ com.datastax.oss.driver.api.core.data.CqlVector", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method java.util.stream.Stream com.datastax.oss.driver.api.core.data.CqlVector::stream()", + "new": "method java.util.stream.Stream com.datastax.oss.driver.api.core.data.CqlVector::stream()", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::subVector(int, int)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.CqlVector::subVector(int, int)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.class.noLongerImplementsInterface", + "old": "class com.datastax.oss.driver.api.core.data.CqlVector", + "new": "class com.datastax.oss.driver.api.core.data.CqlVector", + "interface": "java.lang.Iterable", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "class com.datastax.oss.driver.api.core.data.CqlVector", + "new": "class com.datastax.oss.driver.api.core.data.CqlVector", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.class.superTypeTypeParametersChanged", + "old": "class com.datastax.oss.driver.api.core.data.CqlVector", + "new": "class com.datastax.oss.driver.api.core.data.CqlVector", + "oldSuperType": "java.lang.Iterable", + "newSuperType": "java.lang.Iterable", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===java.lang.Class===)", + "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableById::getVector(com.datastax.oss.driver.api.core.CqlIdentifier, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, ===java.lang.Class===)", + "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, ===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByIndex::getVector(int, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, ===java.lang.Class===)", + "new": "parameter com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, ===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.data.CqlVector com.datastax.oss.driver.api.core.data.GettableByName::getVector(java.lang.String, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", + "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", + "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", + "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableById>::setVector(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", + "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", + "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", + "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByIndex>::setVector(int, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", + "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, ===com.datastax.oss.driver.api.core.data.CqlVector===, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", + "new": "parameter SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, ===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", + "new": "method SelfT com.datastax.oss.driver.api.core.data.SettableByName>::setVector(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector, java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(com.datastax.oss.driver.api.core.type.VectorType, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "new": "parameter com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, ===com.datastax.oss.driver.api.core.type.codec.TypeCodec===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "new": "method com.datastax.oss.driver.api.core.type.codec.TypeCodec> com.datastax.oss.driver.api.core.type.codec.TypeCodecs::vectorOf(int, com.datastax.oss.driver.api.core.type.codec.TypeCodec)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", + "new": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===com.datastax.oss.driver.api.core.type.reflect.GenericType===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===java.lang.Class===)", + "new": "parameter com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(===java.lang.Class===)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.method.returnTypeTypeParametersChanged", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.generics.formalTypeParameterChanged", + "old": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", + "new": "method com.datastax.oss.driver.api.core.type.reflect.GenericType> com.datastax.oss.driver.api.core.type.reflect.GenericType::vectorOf(java.lang.Class)", + "justification": "JAVA-3143: Extend driver vector support to arbitrary subtypes and fix handling of variable length types (OSS C* 5.0)" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "old": "class com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverExecutionProfile.Base", + "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "old": "class com.fasterxml.jackson.databind.type.TypeParser.MyTokenizer", + "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" + }, + { + "code": "java.class.nonPublicPartOfAPI", + "old": "class org.apache.tinkerpop.shaded.jackson.databind.type.TypeParser.MyTokenizer", + "justification": "CASSJAVA-102: Fix spurious complaints about optional dependencies" + }, + { + "code": "java.class.externalClassExposedInAPI", + "justification": "CASSJAVA-102: Migrate revapi config into dedicated config files, ported from pom.xml" + }, + { + "code": "java.method.varargOverloadsOnlyDifferInVarargParameter", + "justification": "CASSJAVA-102: Migrate revapi config into dedicated config files, ported from pom.xml" + }, + { + "code": "java.method.addedToInterface", + "new": "method java.util.Optional com.datastax.oss.driver.api.core.context.DriverContext::getRequestIdGenerator()", + "justification": "CASSJAVA-97: Let users inject an ID for each request and write to the custom payload" } ] } diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java new file mode 100644 index 00000000000..dc420970427 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/DseProtocolVersion.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core; + +import com.datastax.dse.protocol.internal.DseProtocolConstants; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.ProtocolVersion; + +/** + * A DSE-specific protocol version. + * + *

Legacy DSE versions did not have a specific version, but instead reused a Cassandra protocol + * version: DSE 5.0 is supported via {@link DefaultProtocolVersion#V4}, and DSE 4.7 and 4.8 via + * {@link DefaultProtocolVersion#V3}. + * + *

DSE 4.6 and earlier are not supported by this version of the driver, use the 1.x series. + */ +public enum DseProtocolVersion implements ProtocolVersion { + + /** Version 1, supported by DSE 5.1.0 and above. */ + DSE_V1(DseProtocolConstants.Version.DSE_V1, false), + + /** Version 2, supported by DSE 6 and above. */ + DSE_V2(DseProtocolConstants.Version.DSE_V2, false), + ; + + private final int code; + private final boolean beta; + + DseProtocolVersion(int code, boolean beta) { + this.code = code; + this.beta = beta; + } + + @Override + public int getCode() { + return code; + } + + @Override + public boolean isBeta() { + return beta; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java new file mode 100644 index 00000000000..8251aaf767c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/DseSession.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.MavenCoordinates; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * @deprecated All DSE functionality is now available directly on {@link CqlSession}. This type is + * preserved for backward compatibility, but you should now use {@link CqlSession} instead. + */ +@Deprecated +public interface DseSession extends CqlSession { + + /** + * @deprecated the DSE driver is no longer published as a separate artifact. This field is + * preserved for backward compatibility, but it returns the same value as {@link + * CqlSession#OSS_DRIVER_COORDINATES}. + */ + @Deprecated @NonNull MavenCoordinates DSE_DRIVER_COORDINATES = CqlSession.OSS_DRIVER_COORDINATES; + + /** + * Returns a builder to create a new instance. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static DseSessionBuilder builder() { + return new DseSessionBuilder(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java new file mode 100644 index 00000000000..01e5f9f9125 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/DseSessionBuilder.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.NotThreadSafe; + +/** + * @deprecated DSE functionality is now exposed directly on {@link CqlSession}. This class is + * preserved for backward compatibility, but {@link CqlSession#builder()} should be used + * instead. + */ +@NotThreadSafe +@Deprecated +public class DseSessionBuilder extends SessionBuilder { + + @NonNull + @Override + protected DseSession wrap(@NonNull CqlSession defaultSession) { + return new com.datastax.dse.driver.internal.core.session.DefaultDseSession(defaultSession); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java new file mode 100644 index 00000000000..abd68b530b6 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/auth/BaseDseAuthenticator.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.oss.driver.api.core.auth.SyncAuthenticator; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import net.jcip.annotations.ThreadSafe; + +/** + * Base class for {@link SyncAuthenticator} implementations that want to make use of the + * authentication scheme negotiation in DseAuthenticator. + */ +@ThreadSafe +public abstract class BaseDseAuthenticator implements SyncAuthenticator { + + private static final String DSE_AUTHENTICATOR = + "com.datastax.bdp.cassandra.auth.DseAuthenticator"; + + private final String serverAuthenticator; + + protected BaseDseAuthenticator(@NonNull String serverAuthenticator) { + this.serverAuthenticator = serverAuthenticator; + } + + /** + * Return a byte buffer containing the required SASL mechanism. + * + *

This should be one of: + * + *

    + *
  • PLAIN + *
  • GSSAPI + *
+ * + * This must be either a {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} buffer, or a new + * instance every time. + */ + @NonNull + protected abstract ByteBuffer getMechanism(); + + /** + * Return a byte buffer containing the expected successful server challenge. + * + *

This should be one of: + * + *

    + *
  • PLAIN-START + *
  • GSSAPI-START + *
+ * + * This must be either a {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} buffer, or a new + * instance every time. + */ + @NonNull + protected abstract ByteBuffer getInitialServerChallenge(); + + @Nullable + @Override + public ByteBuffer initialResponseSync() { + // DseAuthenticator communicates back the mechanism in response to server authenticate message. + // older authenticators simply expect the auth response with credentials. + if (isDseAuthenticator()) { + return getMechanism(); + } else { + return evaluateChallengeSync(getInitialServerChallenge()); + } + } + + @Override + public void onAuthenticationSuccessSync(@Nullable ByteBuffer token) {} + + private boolean isDseAuthenticator() { + return serverAuthenticator.equals(DSE_AUTHENTICATOR); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java new file mode 100644 index 00000000000..48a0e5b0ef3 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.java @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.auth.AuthenticationException; +import com.datastax.oss.driver.api.core.auth.Authenticator; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.util.Bytes; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import javax.security.auth.Subject; +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.Configuration; +import javax.security.auth.login.LoginContext; +import javax.security.auth.login.LoginException; +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; +import net.jcip.annotations.Immutable; +import net.jcip.annotations.NotThreadSafe; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public abstract class DseGssApiAuthProviderBase implements AuthProvider { + + /** The default SASL service name used by this auth provider. */ + public static final String DEFAULT_SASL_SERVICE_NAME = "dse"; + + /** The name of the system property to use to specify the SASL service name. */ + public static final String SASL_SERVICE_NAME_PROPERTY = "dse.sasl.service"; + + /** + * Legacy system property for SASL protocol name. Clients should migrate to + * SASL_SERVICE_NAME_PROPERTY above. + */ + private static final String LEGACY_SASL_PROTOCOL_PROPERTY = "dse.sasl.protocol"; + + private static final Logger LOG = LoggerFactory.getLogger(DseGssApiAuthProviderBase.class); + + private final String logPrefix; + + /** + * @param logPrefix a string that will get prepended to the logs (this is used for discrimination + * when you have multiple driver instances executing in the same JVM). Config-based + * implementations fill this with {@link Session#getName()}. + */ + protected DseGssApiAuthProviderBase(@NonNull String logPrefix) { + this.logPrefix = Objects.requireNonNull(logPrefix); + } + + @NonNull + protected abstract GssApiOptions getOptions( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator); + + @NonNull + @Override + public Authenticator newAuthenticator( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) + throws AuthenticationException { + return new GssApiAuthenticator( + getOptions(endPoint, serverAuthenticator), endPoint, serverAuthenticator); + } + + @Override + public void onMissingChallenge(@NonNull EndPoint endPoint) { + LOG.warn( + "[{}] {} did not send an authentication challenge; " + + "This is suspicious because the driver expects authentication", + logPrefix, + endPoint); + } + + @Override + public void close() { + // nothing to do + } + + /** + * The options to initialize a new authenticator. + * + *

Use {@link #builder()} to create an instance. + */ + @Immutable + public static class GssApiOptions { + + @NonNull + public static Builder builder() { + return new Builder(); + } + + private final Configuration loginConfiguration; + private final Subject subject; + private final String saslProtocol; + private final String authorizationId; + private final Map saslProperties; + + private GssApiOptions( + @Nullable Configuration loginConfiguration, + @Nullable Subject subject, + @Nullable String saslProtocol, + @Nullable String authorizationId, + @NonNull Map saslProperties) { + this.loginConfiguration = loginConfiguration; + this.subject = subject; + this.saslProtocol = saslProtocol; + this.authorizationId = authorizationId; + this.saslProperties = saslProperties; + } + + @Nullable + public Configuration getLoginConfiguration() { + return loginConfiguration; + } + + @Nullable + public Subject getSubject() { + return subject; + } + + @Nullable + public String getSaslProtocol() { + return saslProtocol; + } + + @Nullable + public String getAuthorizationId() { + return authorizationId; + } + + @NonNull + public Map getSaslProperties() { + return saslProperties; + } + + @NotThreadSafe + public static class Builder { + + private Configuration loginConfiguration; + private Subject subject; + private String saslProtocol; + private String authorizationId; + private final Map saslProperties = new HashMap<>(); + + public Builder() { + saslProperties.put(Sasl.SERVER_AUTH, "true"); + saslProperties.put(Sasl.QOP, "auth"); + } + + /** + * Sets a login configuration that will be used to create a {@link LoginContext}. + * + *

You MUST call either a withLoginConfiguration method or {@link #withSubject(Subject)}; + * if both are called, the subject takes precedence, and the login configuration will be + * ignored. + * + * @see #withLoginConfiguration(Map) + */ + @NonNull + public Builder withLoginConfiguration(@Nullable Configuration loginConfiguration) { + this.loginConfiguration = loginConfiguration; + return this; + } + /** + * Sets a login configuration that will be used to create a {@link LoginContext}. + * + *

This is an alternative to {@link #withLoginConfiguration(Configuration)}, that builds + * the configuration from {@code Krb5LoginModule} with the given options. + * + *

You MUST call either a withLoginConfiguration method or {@link #withSubject(Subject)}; + * if both are called, the subject takes precedence, and the login configuration will be + * ignored. + */ + @NonNull + public Builder withLoginConfiguration(@Nullable Map loginConfiguration) { + this.loginConfiguration = fetchLoginConfiguration(loginConfiguration); + return this; + } + + /** + * Sets a previously authenticated subject to reuse. + * + *

You MUST call either this method or {@link #withLoginConfiguration(Configuration)}; if + * both are called, the subject takes precedence, and the login configuration will be ignored. + */ + @NonNull + public Builder withSubject(@Nullable Subject subject) { + this.subject = subject; + return this; + } + + /** + * Sets the SASL protocol name to use; should match the username of the Kerberos service + * principal used by the DSE server. + */ + @NonNull + public Builder withSaslProtocol(@Nullable String saslProtocol) { + this.saslProtocol = saslProtocol; + return this; + } + + /** Sets the authorization ID (allows proxy authentication). */ + @NonNull + public Builder withAuthorizationId(@Nullable String authorizationId) { + this.authorizationId = authorizationId; + return this; + } + + /** + * Add a SASL property to use when creating the SASL client. + * + *

Note that this builder pre-initializes these two default properties: + * + *

+       * javax.security.sasl.server.authentication = true
+       * javax.security.sasl.qop = auth
+       * 
+ */ + @NonNull + public Builder addSaslProperty(@NonNull String name, @NonNull String value) { + this.saslProperties.put(Objects.requireNonNull(name), Objects.requireNonNull(value)); + return this; + } + + @NonNull + public GssApiOptions build() { + return new GssApiOptions( + loginConfiguration, + subject, + saslProtocol, + authorizationId, + ImmutableMap.copyOf(saslProperties)); + } + + public static Configuration fetchLoginConfiguration(Map options) { + return new Configuration() { + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String name) { + return new AppConfigurationEntry[] { + new AppConfigurationEntry( + "com.sun.security.auth.module.Krb5LoginModule", + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, + options) + }; + } + }; + } + } + } + + protected static class GssApiAuthenticator extends BaseDseAuthenticator { + + private static final ByteBuffer MECHANISM = + ByteBuffer.wrap("GSSAPI".getBytes(Charsets.UTF_8)).asReadOnlyBuffer(); + private static final ByteBuffer SERVER_INITIAL_CHALLENGE = + ByteBuffer.wrap("GSSAPI-START".getBytes(Charsets.UTF_8)).asReadOnlyBuffer(); + private static final ByteBuffer EMPTY_BYTE_ARRAY = + ByteBuffer.wrap(new byte[0]).asReadOnlyBuffer(); + private static final String JAAS_CONFIG_ENTRY = "DseClient"; + private static final String[] SUPPORTED_MECHANISMS = new String[] {"GSSAPI"}; + + private Subject subject; + private SaslClient saslClient; + private EndPoint endPoint; + + protected GssApiAuthenticator( + GssApiOptions options, EndPoint endPoint, String serverAuthenticator) { + super(serverAuthenticator); + + try { + if (options.getSubject() != null) { + this.subject = options.getSubject(); + } else { + Configuration loginConfiguration = options.getLoginConfiguration(); + if (loginConfiguration == null) { + throw new IllegalArgumentException("Must provide one of subject or loginConfiguration"); + } + LoginContext login = new LoginContext(JAAS_CONFIG_ENTRY, null, null, loginConfiguration); + login.login(); + this.subject = login.getSubject(); + } + String protocol = options.getSaslProtocol(); + if (protocol == null) { + protocol = + System.getProperty( + SASL_SERVICE_NAME_PROPERTY, + System.getProperty(LEGACY_SASL_PROTOCOL_PROPERTY, DEFAULT_SASL_SERVICE_NAME)); + } + this.saslClient = + Sasl.createSaslClient( + SUPPORTED_MECHANISMS, + options.getAuthorizationId(), + protocol, + ((InetSocketAddress) endPoint.resolve()).getAddress().getCanonicalHostName(), + options.getSaslProperties(), + null); + } catch (LoginException | SaslException e) { + throw new AuthenticationException(endPoint, e.getMessage()); + } + this.endPoint = endPoint; + } + + @NonNull + @Override + protected ByteBuffer getMechanism() { + return MECHANISM; + } + + @NonNull + @Override + protected ByteBuffer getInitialServerChallenge() { + return SERVER_INITIAL_CHALLENGE; + } + + @Nullable + @Override + public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge) { + + byte[] challengeBytes; + if (SERVER_INITIAL_CHALLENGE.equals(challenge)) { + if (!saslClient.hasInitialResponse()) { + return EMPTY_BYTE_ARRAY; + } + challengeBytes = new byte[0]; + } else { + // The native protocol spec says the incoming challenge can be null depending on the + // implementation. But saslClient.evaluateChallenge clearly documents that the byte array + // can't be null, which probably means that a SASL authenticator never sends back null. + if (challenge == null) { + throw new AuthenticationException(this.endPoint, "Unexpected null challenge from server"); + } + challengeBytes = Bytes.getArray(challenge); + } + try { + + return ByteBuffer.wrap( + Subject.doAs( + subject, + new PrivilegedExceptionAction() { + @Override + public byte[] run() throws SaslException { + return saslClient.evaluateChallenge(challengeBytes); + } + })); + } catch (PrivilegedActionException e) { + throw new AuthenticationException(this.endPoint, e.getMessage(), e.getException()); + } + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java new file mode 100644 index 00000000000..7c5ee23bd6c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderBase.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.ThreadSafe; + +/** + * @deprecated The driver's default plain text providers now support both Apache Cassandra and DSE. + * This type was preserved for backward compatibility, but implementors should now extend {@link + * PlainTextAuthProviderBase} instead. + */ +@ThreadSafe +@Deprecated +public abstract class DsePlainTextAuthProviderBase extends PlainTextAuthProviderBase { + + protected DsePlainTextAuthProviderBase(@NonNull String logPrefix) { + super(logPrefix); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java new file mode 100644 index 00000000000..64ee5265b5a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * {@link AuthProvider} that provides GSSAPI authenticator instances for clients to connect to DSE + * clusters secured with {@code DseAuthenticator}, in a programmatic way. + * + *

To use this provider the corresponding GssApiOptions must be passed into the provider + * directly, for example: + * + *

+ *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
+ *         DseGssApiAuthProviderBase.GssApiOptions.builder();
+ *     Map<String, String> loginConfig =
+ *         ImmutableMap.of(
+ *             "principal",
+ *             "user principal here ex cassandra@DATASTAX.COM",
+ *             "useKeyTab",
+ *             "true",
+ *             "refreshKrb5Config",
+ *             "true",
+ *             "keyTab",
+ *             "Path to keytab file here");
+ *
+ *     builder.withLoginConfiguration(loginConfig);
+ *
+ *     CqlSession session =
+ *         CqlSession.builder()
+ *             .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build()))
+ *             .build();
+ * 
+ * + * or alternatively + * + *
+ *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
+ *         DseGssApiAuthProviderBase.GssApiOptions.builder().withSubject(subject);
+ *     CqlSession session =
+ *         CqlSession.builder()
+ *             .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build()))
+ *             .build();
+ * 
+ * + *

Kerberos Authentication

+ * + * Keytab and ticket cache settings are specified using a standard JAAS configuration file. The + * location of the file can be set using the java.security.auth.login.config system + * property or by adding a login.config.url.n entry in the java.security + * properties file. Alternatively a login-configuration, or subject can be provided to the provider + * via the GssApiOptions (see above). + * + *

See the following documents for further details: + * + *

    + *
  1. JAAS + * Login Configuration File; + *
  2. Krb5LoginModule + * options; + *
  3. JAAS + * Authentication Tutorial for more on JAAS in general. + *
+ * + *

Authentication using ticket cache

+ * + * Run kinit to obtain a ticket and populate the cache before connecting. JAAS config: + * + *
+ * DseClient {
+ *   com.sun.security.auth.module.Krb5LoginModule required
+ *     useTicketCache=true
+ *     renewTGT=true;
+ * };
+ * 
+ * + *

Authentication using a keytab file

+ * + * To enable authentication using a keytab file, specify its location on disk. If your keytab + * contains more than one principal key, you should also specify which one to select. This + * information can also be specified in the driver config, under the login-configuration section. + * + *
+ * DseClient {
+ *     com.sun.security.auth.module.Krb5LoginModule required
+ *       useKeyTab=true
+ *       keyTab="/path/to/file.keytab"
+ *       principal="user@MYDOMAIN.COM";
+ * };
+ * 
+ * + *

Specifying SASL protocol name

+ * + * The SASL protocol name used by this auth provider defaults to " + * {@value #DEFAULT_SASL_SERVICE_NAME}". + * + *

Important: the SASL protocol name should match the username of the Kerberos + * service principal used by the DSE server. This information is specified in the dse.yaml file by + * the {@code service_principal} option under the kerberos_options + * section, and may vary from one DSE installation to another – especially if you installed + * DSE with an automated package installer. + * + *

For example, if your dse.yaml file contains the following: + * + *

{@code
+ * kerberos_options:
+ *     ...
+ *     service_principal: cassandra/my.host.com@MY.REALM.COM
+ * }
+ * + * The correct SASL protocol name to use when authenticating against this DSE server is "{@code + * cassandra}". + * + *

Should you need to change the SASL protocol name specify it in the GssApiOptions, use the + * method below: + * + *

+ *     DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
+ *         DseGssApiAuthProviderBase.GssApiOptions.builder();
+ *     builder.withSaslProtocol("alternate");
+ *     DseGssApiAuthProviderBase.GssApiOptions options = builder.build();
+ * 
+ * + *

Should internal sasl properties need to be set such as qop. This can also be accomplished by + * setting it in the GssApiOptions: + * + *

+ *   DseGssApiAuthProviderBase.GssApiOptions.Builder builder =
+ *         DseGssApiAuthProviderBase.GssApiOptions.builder();
+ *     builder.addSaslProperty("javax.security.sasl.qop", "auth-conf");
+ *     DseGssApiAuthProviderBase.GssApiOptions options = builder.build();
+ * 
+ * + * @see Authenticating + * a DSE cluster with Kerberos + */ +public class ProgrammaticDseGssApiAuthProvider extends DseGssApiAuthProviderBase { + private final GssApiOptions options; + + public ProgrammaticDseGssApiAuthProvider(GssApiOptions options) { + super("Programmatic-Kerberos"); + this.options = options; + } + + @NonNull + @Override + protected GssApiOptions getOptions( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { + return options; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java new file mode 100644 index 00000000000..a3624ba736d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Map; + +public class ProxyAuthentication { + private static final String PROXY_EXECUTE = "ProxyExecute"; + + /** + * Adds proxy authentication information to a CQL statement. + * + *

This allows executing a statement as another role than the one the session is currently + * authenticated as. + * + * @param userOrRole the role to use for execution. If the statement was already configured with + * another role, it will get replaced by this one. + * @param statement the statement to modify. + * @return a statement that will run the same CQL query as {@code statement}, but acting as the + * provided role. Note: with the driver's default implementations, this will always be a copy; + * but if you use a custom implementation, it might return the same instance (depending on the + * behavior of {@link Statement#setCustomPayload(Map) statement.setCustomPayload()}). + * @see Setting + * up roles for applications (DSE 6.0 admin guide) + */ + @NonNull + public static > StatementT executeAs( + @NonNull String userOrRole, @NonNull StatementT statement) { + return statement.setCustomPayload( + addProxyExecuteEntry(statement.getCustomPayload(), userOrRole)); + } + + /** + * Adds proxy authentication information to a graph statement. + * + * @see #executeAs(String, Statement) + */ + @NonNull + public static > StatementT executeAs( + @NonNull String userOrRole, @NonNull StatementT statement) { + return statement.setCustomPayload( + addProxyExecuteEntry(statement.getCustomPayload(), userOrRole)); + } + + private static Map addProxyExecuteEntry( + Map currentPayload, @NonNull String userOrRole) { + NullAllowingImmutableMap.Builder builder = + NullAllowingImmutableMap.builder(); + builder.put(PROXY_EXECUTE, ByteBuffer.wrap(userOrRole.getBytes(Charsets.UTF_8))); + if (!currentPayload.isEmpty()) { + for (Map.Entry entry : currentPayload.entrySet()) { + String key = entry.getKey(); + if (!key.equals(PROXY_EXECUTE)) { + builder.put(key, entry.getValue()); + } + } + } + return builder.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java new file mode 100644 index 00000000000..2694b51ffca --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverConfigLoader.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.config; + +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.File; +import java.net.URL; + +/** + * @deprecated This class only exists for backward compatibility. All of its methods delegate to + * their counterparts on {@link DriverConfigLoader}, which you should call directly instead. + */ +@Deprecated +public class DseDriverConfigLoader { + + /** + * @deprecated This method only exists for backward compatibility. It delegates to {@link + * DriverConfigLoader#fromClasspath(String)}, which you should call directly instead. + */ + @Deprecated + @NonNull + public static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { + return DriverConfigLoader.fromClasspath(resourceBaseName); + } + + /** + * @deprecated This method only exists for backward compatibility. It delegates to {@link + * DriverConfigLoader#fromFile(File)}, which you should call directly instead. + */ + @Deprecated + @NonNull + public static DriverConfigLoader fromFile(@NonNull File file) { + return DriverConfigLoader.fromFile(file); + } + + /** + * @deprecated This method only exists for backward compatibility. It delegates to {@link + * DriverConfigLoader#fromUrl(URL)}, which you should call directly instead. + */ + @Deprecated + @NonNull + public static DriverConfigLoader fromUrl(@NonNull URL url) { + return DriverConfigLoader.fromUrl(url); + } + + /** + * @deprecated This method only exists for backward compatibility. It delegates to {@link + * DriverConfigLoader#programmaticBuilder()}, which you should call directly instead. + */ + @Deprecated + @NonNull + public static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { + return DriverConfigLoader.programmaticBuilder(); + } + + private DseDriverConfigLoader() { + throw new AssertionError("Not meant to be instantiated"); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java new file mode 100644 index 00000000000..4d10501f6d2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/config/DseDriverOption.java @@ -0,0 +1,334 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.config; + +import com.datastax.oss.driver.api.core.config.DriverOption; +import edu.umd.cs.findbugs.annotations.NonNull; + +public enum DseDriverOption implements DriverOption { + /** + * The name of the application using the session. + * + *

Value type: {@link String} + */ + APPLICATION_NAME("basic.application.name"), + /** + * The version of the application using the session. + * + *

Value type: {@link String} + */ + APPLICATION_VERSION("basic.application.version"), + + /** + * Proxy authentication for GSSAPI authentication: allows to login as another user or role. + * + *

Value type: {@link String} + */ + AUTH_PROVIDER_AUTHORIZATION_ID("advanced.auth-provider.authorization-id"), + /** + * Service name for GSSAPI authentication. + * + *

Value type: {@link String} + */ + AUTH_PROVIDER_SERVICE("advanced.auth-provider.service"), + /** + * Login configuration for GSSAPI authentication. + * + *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> + */ + AUTH_PROVIDER_LOGIN_CONFIGURATION("advanced.auth-provider.login-configuration"), + /** + * Internal SASL properties, if any, such as QOP, for GSSAPI authentication. + * + *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> + */ + AUTH_PROVIDER_SASL_PROPERTIES("advanced.auth-provider.sasl-properties"), + + /** + * The page size for continuous paging. + * + *

Value type: int + */ + CONTINUOUS_PAGING_PAGE_SIZE("advanced.continuous-paging.page-size"), + /** + * Whether {@link #CONTINUOUS_PAGING_PAGE_SIZE} should be interpreted in number of rows or bytes. + * + *

Value type: boolean + */ + CONTINUOUS_PAGING_PAGE_SIZE_BYTES("advanced.continuous-paging.page-size-in-bytes"), + /** + * The maximum number of continuous pages to return. + * + *

Value type: int + */ + CONTINUOUS_PAGING_MAX_PAGES("advanced.continuous-paging.max-pages"), + /** + * The maximum number of continuous pages per second. + * + *

Value type: int + */ + CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND("advanced.continuous-paging.max-pages-per-second"), + /** + * The maximum number of continuous pages that can be stored in the local queue. + * + *

Value type: int + */ + CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES("advanced.continuous-paging.max-enqueued-pages"), + /** + * How long to wait for the coordinator to send the first continuous page. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE("advanced.continuous-paging.timeout.first-page"), + /** + * How long to wait for the coordinator to send subsequent continuous pages. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES("advanced.continuous-paging.timeout.other-pages"), + + /** + * The largest latency that we expect to record for continuous requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST( + "advanced.metrics.session.continuous-cql-requests.highest-latency"), + /** + * The number of significant decimal digits to which internal structures will maintain for + * continuous requests. + * + *

Value-type: int + */ + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS( + "advanced.metrics.session.continuous-cql-requests.significant-digits"), + /** + * The interval at which percentile data is refreshed for continuous requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL( + "advanced.metrics.session.continuous-cql-requests.refresh-interval"), + + /** + * The read consistency level to use for graph statements. + * + *

Value type: {@link String} + */ + GRAPH_READ_CONSISTENCY_LEVEL("basic.graph.read-consistency-level"), + /** + * The write consistency level to use for graph statements. + * + *

Value type: {@link String} + */ + GRAPH_WRITE_CONSISTENCY_LEVEL("basic.graph.write-consistency-level"), + /** + * The traversal source to use for graph statements. + * + *

Value type: {@link String} + */ + GRAPH_TRAVERSAL_SOURCE("basic.graph.traversal-source"), + /** + * The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra + * native protocol. + * + *

Value type: {@link String} + */ + GRAPH_SUB_PROTOCOL("advanced.graph.sub-protocol"), + /** + * Whether a script statement represents a system query. + * + *

Value type: boolean + */ + GRAPH_IS_SYSTEM_QUERY("basic.graph.is-system-query"), + /** + * The name of the graph targeted by graph statements. + * + *

Value type: {@link String} + */ + GRAPH_NAME("basic.graph.name"), + /** + * How long the driver waits for a graph request to complete. + * + *

Value-type: {@link java.time.Duration Duration} + */ + GRAPH_TIMEOUT("basic.graph.timeout"), + + /** + * Whether to send events for Insights monitoring. + * + *

Value type: boolean + */ + MONITOR_REPORTING_ENABLED("advanced.monitor-reporting.enabled"), + + /** + * Whether to enable paging for Graph queries. + * + *

Value type: {@link String} + */ + GRAPH_PAGING_ENABLED("advanced.graph.paging-enabled"), + + /** + * The page size for Graph continuous paging. + * + *

Value type: int + */ + GRAPH_CONTINUOUS_PAGING_PAGE_SIZE("advanced.graph.paging-options.page-size"), + + /** + * The maximum number of Graph continuous pages to return. + * + *

Value type: int + */ + GRAPH_CONTINUOUS_PAGING_MAX_PAGES("advanced.graph.paging-options.max-pages"), + /** + * The maximum number of Graph continuous pages per second. + * + *

Value type: int + */ + GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND( + "advanced.graph.paging-options.max-pages-per-second"), + /** + * The maximum number of Graph continuous pages that can be stored in the local queue. + * + *

Value type: int + */ + GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES("advanced.graph.paging-options.max-enqueued-pages"), + /** + * The largest latency that we expect to record for graph requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_SESSION_GRAPH_REQUESTS_HIGHEST("advanced.metrics.session.graph-requests.highest-latency"), + /** + * The number of significant decimal digits to which internal structures will maintain for graph + * requests. + * + *

Value-type: int + */ + METRICS_SESSION_GRAPH_REQUESTS_DIGITS( + "advanced.metrics.session.graph-requests.significant-digits"), + /** + * The interval at which percentile data is refreshed for graph requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_SESSION_GRAPH_REQUESTS_INTERVAL( + "advanced.metrics.session.graph-requests.refresh-interval"), + /** + * The largest latency that we expect to record for graph requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_NODE_GRAPH_MESSAGES_HIGHEST("advanced.metrics.node.graph-messages.highest-latency"), + /** + * The number of significant decimal digits to which internal structures will maintain for graph + * requests. + * + *

Value-type: int + */ + METRICS_NODE_GRAPH_MESSAGES_DIGITS("advanced.metrics.node.graph-messages.significant-digits"), + /** + * The interval at which percentile data is refreshed for graph requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_NODE_GRAPH_MESSAGES_INTERVAL("advanced.metrics.node.graph-messages.refresh-interval"), + + /** + * The shortest latency that we expect to record for continuous requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST( + "advanced.metrics.session.continuous-cql-requests.lowest-latency"), + /** + * Optional service-level objectives to meet, as a list of latencies to track. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO( + "advanced.metrics.session.continuous-cql-requests.slo"), + + /** + * The shortest latency that we expect to record for graph requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_SESSION_GRAPH_REQUESTS_LOWEST("advanced.metrics.session.graph-requests.lowest-latency"), + /** + * Optional service-level objectives to meet, as a list of latencies to track. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_SESSION_GRAPH_REQUESTS_SLO("advanced.metrics.session.graph-requests.slo"), + + /** + * The shortest latency that we expect to record for graph requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_NODE_GRAPH_MESSAGES_LOWEST("advanced.metrics.node.graph-messages.lowest-latency"), + /** + * Optional service-level objectives to meet, as a list of latencies to track. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_NODE_GRAPH_MESSAGES_SLO("advanced.metrics.node.graph-messages.slo"), + /** + * Optional list of percentiles to publish for graph-requests metric. Produces an additional time + * series for each requested percentile. This percentile is computed locally, and so can't be + * aggregated with percentiles computed across other dimensions (e.g. in a different instance). + * + *

Value type: {@link java.util.List List}<{@link Double}> + */ + METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES( + "advanced.metrics.session.graph-requests.publish-percentiles"), + /** + * Optional list of percentiles to publish for node graph-messages metric. Produces an additional + * time series for each requested percentile. This percentile is computed locally, and so can't be + * aggregated with percentiles computed across other dimensions (e.g. in a different instance). + * + *

Value type: {@link java.util.List List}<{@link Double}> + */ + METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES( + "advanced.metrics.node.graph-messages.publish-percentiles"), + /** + * Optional list of percentiles to publish for continuous paging requests metric. Produces an + * additional time series for each requested percentile. This percentile is computed locally, and + * so can't be aggregated with percentiles computed across other dimensions (e.g. in a different + * instance). + * + *

Value type: {@link java.util.List List}<{@link Double}> + */ + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES( + "advanced.metrics.session.continuous-cql-requests.publish-percentiles"), + ; + + private final String path; + + DseDriverOption(String path) { + this.path = path; + } + + @NonNull + @Override + public String getPath() { + return path; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java new file mode 100644 index 00000000000..a9491ec2414 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousAsyncResultSet.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous; + +import com.datastax.oss.driver.api.core.AsyncPagingIterable; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.Statement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.concurrent.CancellationException; + +/** + * The result of an {@linkplain ContinuousSession#executeContinuouslyAsync(Statement) asynchronous + * continuous paging query}. + * + *

DSE replies to a continuous query with a stream of response frames. There is one instance of + * this class for each frame. + */ +public interface ContinuousAsyncResultSet + extends AsyncPagingIterable { + + /** Returns the current page's number. Pages are numbered starting from 1. */ + int pageNumber(); + + /** + * Cancels the continuous query. + * + *

There might still be rows available in the {@linkplain #currentPage() current page} after + * the cancellation; these rows can be retrieved normally. + * + *

Also, there might be more pages available in the driver's local page cache after the + * cancellation; these extra pages will be discarded. + * + *

Therefore, if you plan to resume the iteration later, the correct procedure is as follows: + * + *

    + *
  1. Cancel the operation by invoking this method, or by cancelling the {@linkplain + * #fetchNextPage() next page's future}; + *
  2. Keep iterating on the current page until it doesn't return any more rows; + *
  3. Retrieve the paging state with {@link #getExecutionInfo() + * getExecutionInfo().getPagingState()}; + *
  4. {@linkplain Statement#setPagingState(ByteBuffer) Re-inject the paging state} in the + * statement; + *
  5. Resume the operation by invoking {@link + * ContinuousSession#executeContinuouslyAsync(Statement) executeContinuouslyAsync} again. + *
+ * + * After a cancellation, futures returned by {@link #fetchNextPage()} that are not yet complete + * will always complete exceptionally by throwing a {@link CancellationException}, even if + * they were obtained before the cancellation. + */ + void cancel(); + + /** + * {@inheritDoc} + * + *

Note: because the driver does not support query traces for continuous queries, {@link + * ExecutionInfo#getTracingId()} will always be {@code null}. + */ + @NonNull + @Override + ExecutionInfo getExecutionInfo(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java new file mode 100644 index 00000000000..a333801a59a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousResultSet.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous; + +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.List; + +/** + * The result of a {@linkplain ContinuousSession#executeContinuously(Statement) synchronous + * continuous paging query}. + * + *

It uses {@linkplain ContinuousAsyncResultSet asynchronous calls} internally, but blocks on the + * results in order to provide a synchronous API to its clients. If the query is paged, only the + * first page will be fetched initially, and iteration will trigger background fetches of the next + * pages when necessary. + * + *

Note that this object can only be iterated once: rows are "consumed" as they are read, + * subsequent calls to {@code iterator()} will return the same iterator instance. + * + *

Implementations of this type are not thread-safe. They can only be iterated by the + * thread that invoked {@code session.executeContinuously}. + */ +public interface ContinuousResultSet extends ResultSet { + + /** + * Cancels the continuous query. + * + *

There might still be rows available in the current page after the cancellation; the + * iteration will only stop when such rows are fully iterated upon. + * + *

Also, there might be more pages available in the driver's local page cache after the + * cancellation; these extra pages will be discarded. + * + *

Therefore, if you plan to resume the iteration later, the correct procedure is as follows: + * + *

    + *
  1. Cancel the operation by invoking this method; + *
  2. Keep iterating on this object until it doesn't return any more rows; + *
  3. Retrieve the paging state with {@link #getExecutionInfo() + * getExecutionInfo().getPagingState()}; + *
  4. {@linkplain Statement#setPagingState(ByteBuffer) Re-inject the paging state} in the + * statement; + *
  5. Resume the operation by invoking {@link ContinuousSession#executeContinuously(Statement) + * executeContinuously} again. + *
+ */ + void cancel(); + + /** + * {@inheritDoc} + * + *

Note: because the driver does not support query traces for continuous queries, {@link + * ExecutionInfo#getTracingId()} will always be {@code null}. + */ + @NonNull + @Override + default ExecutionInfo getExecutionInfo() { + List infos = getExecutionInfos(); + return infos.get(infos.size() - 1); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java new file mode 100644 index 00000000000..1c647b33b92 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous; + +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestSyncProcessor; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.session.Session; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import java.util.concurrent.CompletionStage; + +/** + * A session that has the ability to execute continuous paging queries. + * + *

Continuous paging is a new method of streaming bulk amounts of records from DataStax + * Enterprise (DSE) to the Java Driver, available since DSE 5.1. It is mainly intended to be + * leveraged by DSE + * Analytics and Apache Spark™, or by any similar analytics tool that needs to read large + * portions of a table in one single operation, as quick and reliably as possible. + * + *

Continuous paging provides the best performance improvement against regular paging when the + * following conditions are met: + * + *

    + *
  1. The statement must target a single partition or a token range owned by one single replica; + * in practice, this means that the statement must have either a {@linkplain + * Statement#setRoutingKey(ByteBuffer) routing key} or a {@linkplain + * Statement#setRoutingToken(Token) routing token} set; + *
  2. The coordinator must be a replica; in practice, this is usually achieved by using + * token-aware routing (if you are using the driver's default {@link LoadBalancingPolicy}, + * then this condition is met); + *
  3. The consistency level must be {@link DefaultConsistencyLevel#ONE ONE} (or {@link + * DefaultConsistencyLevel#LOCAL_ONE LOCAL_ONE}). + *
+ * + *

If the above conditions are met, the coordinator will be able to optimize the read path and + * serve results from local data, thus significantly improving response times; if however these + * conditions cannot be met, continuous paging would still work, but response times wouldn't be + * significantly better than those of regular paging anymore. + * + * @see Continuous + * paging options in cassandra.yaml configuration file + * @see DSE + * Continuous Paging Tuning and Support Guide + */ +public interface ContinuousSession extends Session { + + /** + * Executes the provided query with continuous paging synchronously. + * + *

This method takes care of chaining the successive results into a convenient iterable, + * provided that you always access the result from the same thread. For more flexibility, consider + * using the {@linkplain #executeContinuouslyAsync(Statement) asynchronous variant} of this method + * instead. + * + *

See {@link ContinuousSession} for more explanations about continuous paging. + * + *

This feature is only available with DataStax Enterprise. Executing continuous queries + * against an Apache Cassandra© cluster will result in a runtime error. + * + * @param statement the query to execute. + * @return a synchronous iterable on the results. + */ + @NonNull + default ContinuousResultSet executeContinuously(@NonNull Statement statement) { + return Objects.requireNonNull( + execute(statement, ContinuousCqlRequestSyncProcessor.CONTINUOUS_RESULT_SYNC)); + } + + /** + * Executes the provided query with continuous paging asynchronously. + * + *

The server will push all requested pages asynchronously, according to the options defined in + * the current execution profile. The client should consume all pages as quickly as possible, to + * avoid blocking the server for too long. The server will adjust the rate according to the client + * speed, but it will give up if the client does not consume any pages in a period of time equal + * to the read request timeout. + * + *

See {@link ContinuousSession} for more explanations about continuous paging. + * + *

This feature is only available with DataStax Enterprise. Executing continuous queries + * against an Apache Cassandra© cluster will result in a runtime error. + * + * @param statement the query to execute. + * @return a future to the first asynchronous result. + */ + @NonNull + default CompletionStage executeContinuouslyAsync( + @NonNull Statement statement) { + return Objects.requireNonNull( + execute(statement, ContinuousCqlRequestAsyncProcessor.CONTINUOUS_RESULT_ASYNC)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java new file mode 100644 index 00000000000..6b645ad05bf --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveResultSet.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; + +/** + * A marker interface for publishers returned by {@link ContinuousReactiveSession}. + * + * @see ContinuousReactiveSession#executeContinuouslyReactive(String) + * @see ContinuousReactiveSession#executeContinuouslyReactive(Statement) + */ +public interface ContinuousReactiveResultSet extends ReactiveResultSet {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java new file mode 100644 index 00000000000..d00013731cb --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous.reactive; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Session; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import org.reactivestreams.Publisher; + +/** + * A {@link Session} that offers utility methods to issue queries using reactive-style programming + * and continuous paging, combined together. + * + *

Methods in this interface all return {@link ContinuousReactiveResultSet} instances. All + * publishers support multiple subscriptions in a unicast fashion: each subscriber triggers an + * independent request execution and gets its own copy of the results. + * + *

Also, note that the publishers may emit items to their subscribers on an internal driver IO + * thread. Subscriber implementors are encouraged to abide by Reactive Streams + * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside + * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down + * the driver and impact performance. Instead, they should asynchronously dispatch received signals + * to their processing logic. + * + * @see ReactiveRow + */ +public interface ContinuousReactiveSession extends Session { + + /** + * Returns a {@link Publisher} that, once subscribed to, executes the given query continuously and + * emits all the results. + * + *

See {@link ContinuousSession} for more explanations about continuous paging. + * + *

This feature is only available with DataStax Enterprise. Executing continuous queries + * against an Apache Cassandra® cluster will result in a runtime error. + * + * @param query the query to execute. + * @return The {@link Publisher} that will publish the returned results. + */ + @NonNull + default ContinuousReactiveResultSet executeContinuouslyReactive(@NonNull String query) { + return executeContinuouslyReactive(SimpleStatement.newInstance(query)); + } + + /** + * Returns a {@link Publisher} that, once subscribed to, executes the given query continuously and + * emits all the results. + * + *

See {@link ContinuousSession} for more explanations about continuous paging. + * + *

This feature is only available with DataStax Enterprise. Executing continuous queries + * against an Apache Cassandra® cluster will result in a runtime error. + * + * @param statement the statement to execute. + * @return The {@link Publisher} that will publish the returned results. + */ + @NonNull + default ContinuousReactiveResultSet executeContinuouslyReactive(@NonNull Statement statement) { + return Objects.requireNonNull( + execute(statement, ContinuousCqlRequestReactiveProcessor.CONTINUOUS_REACTIVE_RESULT_SET)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java new file mode 100644 index 00000000000..55a898cd3ee --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveQueryMetadata.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.reactive; + +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.reactivestreams.Publisher; + +/** + * Interface implemented by all the reactive result set publishers provided by the driver, and + * notably by {@link ReactiveResultSet}. + */ +public interface ReactiveQueryMetadata { + + /** + * Returns metadata about the {@linkplain ColumnDefinitions columns} contained in this result set. + * + *

This publisher emits exactly one item as soon as the first response arrives, then completes. + * If the query execution fails within the first request-response cycle, then this + * publisher will fail with the same error; however if the error happens after the first + * response, then this publisher will be already completed and will not acknowledge that + * error in any way. + * + *

By default, publishers returned by this method do not support multiple subscriptions. + * + * @see ReactiveRow#getColumnDefinitions() + */ + @NonNull + Publisher getColumnDefinitions(); + + /** + * Returns {@linkplain ExecutionInfo information about the execution} of all requests that have + * been performed so far to assemble this result set. + * + *

If the query is not paged, this publisher will emit exactly one item as soon as the response + * arrives, then complete. If the query is paged, it will emit multiple items, one per page; then + * it will complete when the last page arrives. If the query execution fails, then this publisher + * will fail with the same error. + * + *

By default, publishers returned by this method do not support multiple subscriptions. + * + * @see ReactiveRow#getExecutionInfo() + */ + @NonNull + Publisher getExecutionInfos(); + + /** + * If the query that produced this result was a conditional update, indicates whether it was + * successfully applied. + * + *

This publisher emits exactly one item as soon as the first response arrives, then completes. + * If the query execution fails within the first request-response cycle, then this + * publisher will fail with the same error; however if the error happens after the first + * response, then this publisher will be already completed and will not acknowledge that + * error in any way. + * + *

By default, publishers returned by this method do not support multiple subscriptions. + * + *

For consistency, this method always returns {@code true} for non-conditional queries + * (although there is no reason to call the method in that case). This is also the case for + * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF + * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. + * + *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this + * method to always return {@code true} for batches containing conditional queries. + * + * @see ReactiveRow#wasApplied() + */ + @NonNull + Publisher wasApplied(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java new file mode 100644 index 00000000000..0e44dab8cab --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.reactive; + +import com.datastax.oss.driver.api.core.cql.Statement; +import org.reactivestreams.Publisher; + +/** + * A {@link Publisher} of {@link ReactiveRow}s returned by a {@link ReactiveSession}. + * + *

By default, all implementations returned by the driver are cold, unicast, single-subscriber + * only publishers. In other words, they do not support multiple subscriptions; consider + * caching the results produced by such publishers if you need to consume them by more than one + * downstream subscriber. + * + *

Also, note that reactive result sets may emit items to their subscribers on an internal driver + * IO thread. Subscriber implementors are encouraged to abide by Reactive Streams + * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside + * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down + * the driver and impact performance. Instead, they should asynchronously dispatch received signals + * to their processing logic. + * + *

This interface exists mainly to expose useful information about {@linkplain + * #getExecutionInfos() request execution} and {@linkplain #getColumnDefinitions() query metadata}. + * This is particularly convenient for queries that do not return rows; for queries that do return + * rows, it is also possible, and oftentimes easier, to access that same information {@linkplain + * ReactiveRow at row level}. + * + * @see ReactiveSession#executeReactive(String) + * @see ReactiveSession#executeReactive(Statement) + * @see ReactiveRow + */ +public interface ReactiveResultSet extends Publisher, ReactiveQueryMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java new file mode 100644 index 00000000000..c3b94689580 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.reactive; + +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.type.DataTypes; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * A row produced by a {@linkplain ReactiveResultSet reactive result set}. + * + *

This is essentially an extension of the driver's {@link Row} object that also exposes useful + * information about {@linkplain #getExecutionInfo() request execution} and {@linkplain + * #getColumnDefinitions() query metadata} (note however that this information is also exposed at + * result set level for convenience). + * + * @see ReactiveSession + * @see ReactiveResultSet + */ +public interface ReactiveRow extends Row { + + /** + * Returns the column definitions contained in this row. + * + *

This object is the same for all rows pertaining to the same result set. + * + * @return the column definitions contained in this row. + * @see ReactiveResultSet#getColumnDefinitions() + */ + @NonNull + @Override + ColumnDefinitions getColumnDefinitions(); + + /** + * The execution information for the paged request that produced this result. + * + *

This object is the same for two rows pertaining to the same page, but differs for rows + * pertaining to different pages. + * + * @return the execution information for the paged request that produced this result. + * @see ReactiveResultSet#getExecutionInfos() + */ + @NonNull + ExecutionInfo getExecutionInfo(); + + /** + * If the query that produced this result was a conditional update, indicates whether it was + * successfully applied. + * + *

This is equivalent to calling: + * + *

{@code
+   * ReactiveRow row = ...
+   * boolean wasApplied = row.getBoolean("[applied]");
+   * }
+ * + *

For consistency, this method always returns {@code true} for non-conditional queries + * (although there is no reason to call the method in that case). This is also the case for + * conditional DDL statements ({@code CREATE KEYSPACE... IF NOT EXISTS}, {@code CREATE TABLE... IF + * NOT EXISTS}), for which Cassandra doesn't return an {@code [applied]} column. + * + *

Note that, for versions of Cassandra strictly lower than 2.1.0-rc2, a server-side bug (CASSANDRA-7337) causes this + * method to always return {@code true} for batches containing conditional queries. + * + *

This method always return the same value for all results in the result set. + * + * @return {@code true} for non-conditional queries and for conditional queries that were + * successfully applied, {@code false} otherwise. + */ + default boolean wasApplied() { + return !getColumnDefinitions().contains("[applied]") + || !getColumnDefinitions().get("[applied]").getType().equals(DataTypes.BOOLEAN) + || getBoolean("[applied]"); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java new file mode 100644 index 00000000000..2fd8ffe41c2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.reactive; + +import com.datastax.dse.driver.internal.core.cql.reactive.CqlRequestReactiveProcessor; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Session; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Objects; +import org.reactivestreams.Publisher; + +/** + * A {@link Session} that offers utility methods to issue queries using reactive-style programming. + * + *

Methods in this interface all return {@link ReactiveResultSet} instances. See the javadocs of + * this interface for important remarks anc caveats regarding the subscription to and consumption of + * reactive result sets. + * + * @see ReactiveResultSet + * @see ReactiveRow + */ +public interface ReactiveSession extends Session { + + /** + * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all + * the results. + * + *

This is an alias for {@link #executeReactive(Statement)} + * executeReactive(SimpleStatement.newInstance(query))}. + * + * @param query the query to execute. + * @return The {@link Publisher} that will publish the returned results. + * @see SimpleStatement#newInstance(String) + */ + @NonNull + default ReactiveResultSet executeReactive(@NonNull String query) { + return executeReactive(SimpleStatement.newInstance(query)); + } + + /** + * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all + * the results. + * + *

This is an alias for {@link #executeReactive(Statement)} + * executeReactive(SimpleStatement.newInstance(query, values))}. + * + * @param query the query to execute. + * @param values the values for placeholders in the query string. Individual values can be {@code + * null}, but the vararg array itself can't. + * @return The {@link Publisher} that will publish the returned results. + * @see SimpleStatement#newInstance(String,Object...) + */ + @NonNull + default ReactiveResultSet executeReactive(@NonNull String query, @NonNull Object... values) { + return executeReactive(SimpleStatement.newInstance(query, values)); + } + + /** + * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all + * the results. + * + *

This is an alias for {@link #executeReactive(Statement)} + * executeReactive(SimpleStatement.newInstance(query,values))}. + * + * @param query the query to execute. + * @param values the values for named placeholders in the query string. Individual values can be + * {@code null}, but the map itself can't. + * @return The {@link Publisher} that will publish the returned results. + * @see SimpleStatement#newInstance(String,Map) + */ + @NonNull + default ReactiveResultSet executeReactive( + @NonNull String query, @NonNull Map values) { + return executeReactive(SimpleStatement.newInstance(query, values)); + } + + /** + * Returns a {@link Publisher} that, once subscribed to, executes the given query and emits all + * the results. + * + * @param statement the statement to execute. + * @return The {@link Publisher} that will publish the returned results. + */ + @NonNull + default ReactiveResultSet executeReactive(@NonNull Statement statement) { + return Objects.requireNonNull( + execute(statement, CqlRequestReactiveProcessor.REACTIVE_RESULT_SET)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java new file mode 100644 index 00000000000..01a5f514aba --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/cql/reactive/package-info.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Types related to CQL execution using reactive-style programming. + * + *

Note that this is located in a {@code dse} package for historical reasons; reactive queries + * can now be used with open-source Cassandra as well. + */ +package com.datastax.dse.driver.api.core.cql.reactive; diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java new file mode 100644 index 00000000000..66a5708832e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Geometry.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; + +/** + * The driver-side representation for a DSE geospatial type. + * + *

+ *     Row row = dseSession.execute("SELECT coords FROM points_of_interest WHERE name = 'Eiffel Tower'").one();
+ *     Point coords = row.get("coords", Point.class);
+ * 
+ * + * The default implementations returned by the driver are immutable and serializable. If you write + * your own implementations, they should at least be thread-safe; serializability is not mandatory, + * but recommended for use with some 3rd-party tools like Apache Spark ™. + */ +public interface Geometry { + + /** + * Returns a Well-known Text (WKT) + * representation of this geospatial type. + */ + @NonNull + String asWellKnownText(); + + /** + * Returns a Well-known + * Binary (WKB) representation of this geospatial type. + * + *

Note that, due to DSE implementation details, the resulting byte buffer always uses + * little-endian order, regardless of the platform's native order. + */ + @NonNull + ByteBuffer asWellKnownBinary(); + + /** Returns a JSON representation of this geospatial type. */ + @NonNull + String asGeoJson(); + + /** + * Tests whether this geospatial type instance contains another instance. + * + * @param other the other instance. + * @return whether {@code this} contains {@code other}. + */ + boolean contains(@NonNull Geometry other); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java new file mode 100644 index 00000000000..7f77b3202a2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/LineString.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; +import com.esri.core.geometry.ogc.OGCLineString; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.List; + +/** + * The driver-side representation for DSE's {@code LineString}. + * + *

This is a curve in a two-dimensional XY-plane, represented by a set of points (with linear + * interpolation between them). + * + *

The default implementation returned by the driver is immutable. + */ +public interface LineString extends Geometry { + /** + * Creates a line string from its Well-known Text (WKT) representation. + * + * @param source the Well-known Text representation to parse. + * @return the line string represented by the WKT. + * @throws IllegalArgumentException if the string does not contain a valid Well-known Text + * representation. + */ + @NonNull + static LineString fromWellKnownText(@NonNull String source) { + return new DefaultLineString(DefaultGeometry.fromOgcWellKnownText(source, OGCLineString.class)); + } + + /** + * Creates a line string from its Well-known Binary + * (WKB) representation. + * + * @param source the Well-known Binary representation to parse. + * @return the line string represented by the WKB. + * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid + * Well-known Binary representation. + */ + @NonNull + static LineString fromWellKnownBinary(@NonNull ByteBuffer source) { + return new DefaultLineString( + DefaultGeometry.fromOgcWellKnownBinary(source, OGCLineString.class)); + } + + /** + * Creates a line string from a GeoJSON + * LineString representation. + * + * @param source the GeoJSON + * LineString representation to parse. + * @return the line string represented by the GeoJSON LineString. + * @throws IllegalArgumentException if the string does not contain a valid GeoJSON LineString + * representation. + */ + @NonNull + static LineString fromGeoJson(@NonNull String source) { + return new DefaultLineString(DefaultGeometry.fromOgcGeoJson(source, OGCLineString.class)); + } + + /** Creates a line string from two or more points. */ + @NonNull + static LineString fromPoints(@NonNull Point p1, @NonNull Point p2, @NonNull Point... pn) { + return new DefaultLineString(p1, p2, pn); + } + + @NonNull + List getPoints(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java new file mode 100644 index 00000000000..b064b3fb222 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Point.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; +import com.esri.core.geometry.ogc.OGCPoint; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; + +/** + * The driver-side representation of DSE's {@code Point}. + * + *

This is a zero-dimensional object that represents a specific (X,Y) location in a + * two-dimensional XY-plane. In case of Geographic Coordinate Systems, the X coordinate is the + * longitude and the Y is the latitude. + * + *

The default implementation returned by the driver is immutable. + */ +public interface Point extends Geometry { + + /** + * Creates a point from its Well-known + * Text (WKT) representation. + * + * @param source the Well-known Text representation to parse. + * @return the point represented by the WKT. + * @throws IllegalArgumentException if the string does not contain a valid Well-known Text + * representation. + */ + @NonNull + static Point fromWellKnownText(@NonNull String source) { + return new DefaultPoint(DefaultGeometry.fromOgcWellKnownText(source, OGCPoint.class)); + } + + /** + * Creates a point from its Well-known Binary + * (WKB) representation. + * + * @param source the Well-known Binary representation to parse. + * @return the point represented by the WKB. + * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid + * Well-known Binary representation. + */ + @NonNull + static Point fromWellKnownBinary(@NonNull ByteBuffer source) { + return new DefaultPoint(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPoint.class)); + } + + /** + * Creates a point from a GeoJSON + * Point representation. + * + * @param source the GeoJSON Point + * representation to parse. + * @return the point represented by the GeoJSON Point. + * @throws IllegalArgumentException if the string does not contain a valid GeoJSON Point representation. + */ + @NonNull + static Point fromGeoJson(@NonNull String source) { + return new DefaultPoint(DefaultGeometry.fromOgcGeoJson(source, OGCPoint.class)); + } + + /** + * Creates a new point. + * + * @param x The X coordinate of this point (or its longitude in Geographic Coordinate Systems). + * @param y The Y coordinate of this point (or its latitude in Geographic Coordinate Systems). + * @return the point represented by coordinates. + */ + @NonNull + static Point fromCoordinates(double x, double y) { + return new DefaultPoint(x, y); + } + + /** + * Returns the X coordinate of this 2D point (or its longitude in Geographic Coordinate Systems). + */ + double X(); + + /** + * Returns the Y coordinate of this 2D point (or its latitude in Geographic Coordinate Systems). + */ + double Y(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java new file mode 100644 index 00000000000..d793704defa --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/geometry/Polygon.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; +import com.esri.core.geometry.ogc.OGCPolygon; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.List; + +/** + * The driver-side representation of DSE's {@code Polygon}. + * + *

This is a planar surface in a two-dimensional XY-plane, represented by one exterior boundary + * and 0 or more interior boundaries. + * + *

The default implementation returned by the driver is immutable. + */ +public interface Polygon extends Geometry { + /** + * Creates a polygon from its Well-known + * Text (WKT) representation. + * + * @param source the Well-known Text representation to parse. + * @return the polygon represented by the WKT. + * @throws IllegalArgumentException if the string does not contain a valid Well-known Text + * representation. + */ + @NonNull + static Polygon fromWellKnownText(@NonNull String source) { + return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownText(source, OGCPolygon.class)); + } + + /** + * Creates a polygon from its Well-known Binary + * (WKB) representation. + * + * @param source the Well-known Binary representation to parse. + * @return the polygon represented by the WKB. + * @throws IllegalArgumentException if the provided {@link ByteBuffer} does not contain a valid + * Well-known Binary representation. + */ + @NonNull + static Polygon fromWellKnownBinary(@NonNull ByteBuffer source) { + return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPolygon.class)); + } + + /** + * Creates a polygon from a GeoJSON + * Polygon representation. + * + * @param source the GeoJSON Polygon + * representation to parse. + * @return the polygon represented by the GeoJSON Polygon. + * @throws IllegalArgumentException if the string does not contain a valid GeoJSON Polygon representation. + */ + @NonNull + static Polygon fromGeoJson(@NonNull String source) { + return new DefaultPolygon(DefaultGeometry.fromOgcGeoJson(source, OGCPolygon.class)); + } + + /** Creates a polygon from a series of 3 or more points. */ + @NonNull + static Polygon fromPoints( + @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { + return new DefaultPolygon(p1, p2, p3, pn); + } + + /** + * Returns a polygon builder. + * + *

This is intended for complex polygons with multiple rings (i.e. holes inside the polygon). + * For simple cases, consider {@link #fromPoints(Point, Point, Point, Point...)} instead. + */ + @NonNull + static Builder builder() { + return new DefaultPolygon.Builder(); + } + + /** Returns the external ring of the polygon. */ + @NonNull + List getExteriorRing(); + + /** + * Returns the internal rings of the polygon, i.e. any holes inside of it (or islands inside of + * the holes). + */ + @NonNull + List> getInteriorRings(); + + /** Provides a simple DSL to build a polygon. */ + interface Builder { + /** + * Adds a new ring for this polygon. + * + *

There can be one or more outer rings and zero or more inner rings. If a polygon has an + * inner ring, the inner ring looks like a hole. If the hole contains another outer ring, that + * outer ring looks like an island. + * + *

There must be one "main" outer ring that contains all the others. + */ + @NonNull + Builder addRing(@NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn); + + @NonNull + Polygon build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java new file mode 100644 index 00000000000..3dd48915dba --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRange.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.time; + +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.base.Strings; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; +import java.text.ParseException; +import java.time.ZonedDateTime; +import java.util.Objects; +import java.util.Optional; + +/** + * A date range, as defined by the server type {@code + * org.apache.cassandra.db.marshal.DateRangeType}, corresponding to the Apache Solr type {@code + * DateRangeField}. + * + *

A date range can be either {@linkplain DateRange#DateRange(DateRangeBound) single-bounded}, in + * which case it represents a unique instant (e.g. "{@code 2001-01-01}"), or {@linkplain + * #DateRange(DateRangeBound, DateRangeBound) double-bounded}, in which case it represents an + * interval of time (e.g. "{@code [2001-01-01 TO 2002]}"). + * + *

Date range {@linkplain DateRangeBound bounds} are always inclusive; they must be either valid + * dates, or the special value {@link DateRangeBound#UNBOUNDED UNBOUNDED}, represented by a "{@code + * *}", e.g. "{@code [2001 TO *]}". + * + *

Instances can be more easily created with the {@link #parse(String)} method. + * + *

This class is immutable and thread-safe. + * + * @since DSE 5.1 + */ +public class DateRange implements Serializable { + + /** + * Parses the given string as a date range. + * + *

The given input must be compliant with Apache Solr type {@code + * DateRangeField} syntax; it can either be a {@linkplain #DateRange(DateRangeBound) + * single-bounded range}, or a {@linkplain #DateRange(DateRangeBound, DateRangeBound) + * double-bounded range}. + * + * @throws ParseException if the given string could not be parsed into a valid range. + * @see DateRangeBound#parseLowerBound(String) + * @see DateRangeBound#parseUpperBound(String) + */ + @NonNull + public static DateRange parse(@NonNull String source) throws ParseException { + if (Strings.isNullOrEmpty(source)) { + throw new ParseException("Date range is null or empty", 0); + } + + if (source.charAt(0) == '[') { + if (source.charAt(source.length() - 1) != ']') { + throw new ParseException( + "If date range starts with '[' it must end with ']'; got " + source, + source.length() - 1); + } + int middle = source.indexOf(" TO "); + if (middle < 0) { + throw new ParseException( + "If date range starts with '[' it must contain ' TO '; got " + source, 0); + } + String lowerBoundString = source.substring(1, middle); + int upperBoundStart = middle + 4; + String upperBoundString = source.substring(upperBoundStart, source.length() - 1); + DateRangeBound lowerBound; + try { + lowerBound = DateRangeBound.parseLowerBound(lowerBoundString); + } catch (Exception e) { + throw newParseException("Cannot parse date range lower bound: " + source, 1, e); + } + DateRangeBound upperBound; + try { + upperBound = DateRangeBound.parseUpperBound(upperBoundString); + } catch (Exception e) { + throw newParseException( + "Cannot parse date range upper bound: " + source, upperBoundStart, e); + } + return new DateRange(lowerBound, upperBound); + } else { + try { + return new DateRange(DateRangeBound.parseLowerBound(source)); + } catch (Exception e) { + throw newParseException("Cannot parse single date range bound: " + source, 0, e); + } + } + } + + @NonNull private final DateRangeBound lowerBound; + @Nullable private final DateRangeBound upperBound; + + /** + * Creates a "single bounded" instance, i.e., a date range whose upper and lower bounds are + * identical. + * + * @throws NullPointerException if {@code singleBound} is null. + */ + public DateRange(@NonNull DateRangeBound singleBound) { + this.lowerBound = Preconditions.checkNotNull(singleBound, "singleBound cannot be null"); + this.upperBound = null; + } + + /** + * Creates an instance composed of two distinct bounds. + * + * @throws NullPointerException if {@code lowerBound} or {@code upperBound} is null. + * @throws IllegalArgumentException if both {@code lowerBound} and {@code upperBound} are not + * unbounded and {@code lowerBound} is greater than {@code upperBound}. + */ + public DateRange(@NonNull DateRangeBound lowerBound, @NonNull DateRangeBound upperBound) { + Preconditions.checkNotNull(lowerBound, "lowerBound cannot be null"); + Preconditions.checkNotNull(upperBound, "upperBound cannot be null"); + if (!lowerBound.isUnbounded() + && !upperBound.isUnbounded() + && lowerBound.getTimestamp().compareTo(upperBound.getTimestamp()) >= 0) { + throw new IllegalArgumentException( + String.format( + "Lower bound of a date range should be before upper bound, got: [%s TO %s]", + lowerBound, upperBound)); + } + this.lowerBound = lowerBound; + this.upperBound = upperBound; + } + + /** Returns the lower bound of this range (inclusive). */ + @NonNull + public DateRangeBound getLowerBound() { + return lowerBound; + } + + /** + * Returns the upper bound of this range (inclusive), or empty if the range is {@linkplain + * #isSingleBounded() single-bounded}. + */ + @NonNull + public Optional getUpperBound() { + return Optional.ofNullable(upperBound); + } + + /** + * Returns whether this range is single-bounded, i.e. if the upper and lower bounds are identical. + */ + public boolean isSingleBounded() { + return upperBound == null; + } + + /** + * Returns the string representation of this range, in a format compatible with Apache Solr + * DateRageField syntax + * + * @see DateRangeBound#toString() + */ + @NonNull + @Override + public String toString() { + if (isSingleBounded()) { + return lowerBound.toString(); + } else { + return String.format("[%s TO %s]", lowerBound, upperBound); + } + } + + @Override + public boolean equals(@Nullable Object other) { + if (other == this) { + return true; + } else if (other instanceof DateRange) { + DateRange that = (DateRange) other; + return Objects.equals(this.lowerBound, that.lowerBound) + && Objects.equals(this.upperBound, that.upperBound); + + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(lowerBound, upperBound); + } + + private static ParseException newParseException(String message, int offset, Exception cause) { + ParseException parseException = new ParseException(message, offset); + parseException.initCause(cause); + return parseException; + } + + /** + * This object gets replaced by an internal proxy for serialization. + * + * @serialData the lower bound timestamp and precision, followed by the upper bound timestamp and + * precision, or two {@code null}s if the range is single-bounded. + */ + private Object writeReplace() { + return new SerializationProxy(this); + } + + private static class SerializationProxy implements Serializable { + + private static final long serialVersionUID = 1L; + + private final ZonedDateTime lowerBoundTimestamp; + private final DateRangePrecision lowerBoundPrecision; + private final ZonedDateTime upperBoundTimestamp; + private final DateRangePrecision upperBoundPrecision; + + SerializationProxy(DateRange input) { + this.lowerBoundTimestamp = input.lowerBound.getTimestamp(); + this.lowerBoundPrecision = input.lowerBound.getPrecision(); + if (input.upperBound != null) { + this.upperBoundTimestamp = input.upperBound.getTimestamp(); + this.upperBoundPrecision = input.upperBound.getPrecision(); + } else { + this.upperBoundTimestamp = null; + this.upperBoundPrecision = null; + } + } + + private Object readResolve() { + if (upperBoundTimestamp == null ^ upperBoundPrecision == null) { + // Should not happen, but protect against corrupted streams + throw new IllegalArgumentException( + "Invalid serialized form, upper bound timestamp and precision " + + "should be either both null or both non-null"); + } + + if (upperBoundTimestamp == null) { + return new DateRange(DateRangeBound.lowerBound(lowerBoundTimestamp, lowerBoundPrecision)); + } else { + return new DateRange( + DateRangeBound.lowerBound(lowerBoundTimestamp, lowerBoundPrecision), + DateRangeBound.upperBound(upperBoundTimestamp, upperBoundPrecision)); + } + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java new file mode 100644 index 00000000000..1621b8bf742 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangeBound.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.time; + +import com.datastax.dse.driver.internal.core.search.DateRangeUtil; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.text.ParseException; +import java.time.ZonedDateTime; +import java.util.Calendar; +import java.util.Objects; + +/** + * A date range bound. + * + *

It is composed of a {@link ZonedDateTime} field and a corresponding {@link + * DateRangePrecision}. + * + *

Date range bounds are inclusive. The special value {@link #UNBOUNDED} denotes an un unbounded + * (infinite) bound, represented by a {@code *} sign. + * + *

This class is immutable and thread-safe. + */ +public class DateRangeBound { + + /** + * The unbounded {@link DateRangeBound} instance. It is syntactically represented by a {@code *} + * (star) sign. + */ + public static final DateRangeBound UNBOUNDED = new DateRangeBound(); + + /** + * Parses the given input as a lower date range bound. + * + *

The input should be a Lucene-compliant + * string. + * + *

The returned bound will have its {@linkplain DateRangePrecision precision} inferred from the + * input, and its timestamp will be {@linkplain DateRangePrecision#roundDown(ZonedDateTime) + * rounded down} to that precision. + * + *

Note that, in order to align with the server's parsing behavior, dates will always be parsed + * in the UTC time zone. + * + * @throws NullPointerException if {@code lowerBound} is {@code null}. + * @throws ParseException if the given input cannot be parsed. + */ + @NonNull + public static DateRangeBound parseLowerBound(@NonNull String source) throws ParseException { + Preconditions.checkNotNull(source); + Calendar calendar = DateRangeUtil.parseCalendar(source); + DateRangePrecision precision = DateRangeUtil.getPrecision(calendar); + return (precision == null) + ? UNBOUNDED + : lowerBound(DateRangeUtil.toZonedDateTime(calendar), precision); + } + + /** + * Parses the given input as an upper date range bound. + * + *

The input should be a Lucene-compliant + * string. + * + *

The returned bound will have its {@linkplain DateRangePrecision precision} inferred from the + * input, and its timestamp will be {@linkplain DateRangePrecision#roundUp(ZonedDateTime)} rounded + * up} to that precision. + * + *

Note that, in order to align with the server's behavior (e.g. when using date range literals + * in CQL query strings), dates must always be in the UTC time zone: an optional trailing {@code + * Z}" is allowed, but no other time zone ID (not even {@code UTC}, {@code GMT} or {@code +00:00}) + * is permitted. + * + * @throws NullPointerException if {@code upperBound} is {@code null}. + * @throws ParseException if the given input cannot be parsed. + */ + public static DateRangeBound parseUpperBound(String source) throws ParseException { + Preconditions.checkNotNull(source); + Calendar calendar = DateRangeUtil.parseCalendar(source); + DateRangePrecision precision = DateRangeUtil.getPrecision(calendar); + return (precision == null) + ? UNBOUNDED + : upperBound(DateRangeUtil.toZonedDateTime(calendar), precision); + } + + /** + * Creates a date range lower bound from the given date and precision. Temporal fields smaller + * than the precision will be rounded down. + */ + public static DateRangeBound lowerBound(ZonedDateTime timestamp, DateRangePrecision precision) { + return new DateRangeBound(precision.roundDown(timestamp), precision); + } + + /** + * Creates a date range upper bound from the given date and precision. Temporal fields smaller + * than the precision will be rounded up. + */ + public static DateRangeBound upperBound(ZonedDateTime timestamp, DateRangePrecision precision) { + return new DateRangeBound(precision.roundUp(timestamp), precision); + } + + @Nullable private final ZonedDateTime timestamp; + @Nullable private final DateRangePrecision precision; + + private DateRangeBound(@NonNull ZonedDateTime timestamp, @NonNull DateRangePrecision precision) { + Preconditions.checkNotNull(timestamp); + Preconditions.checkNotNull(precision); + this.timestamp = timestamp; + this.precision = precision; + } + + // constructor used for the special UNBOUNDED value + private DateRangeBound() { + this.timestamp = null; + this.precision = null; + } + + /** Whether this bound is unbounded (i.e. denotes the special {@code *} value). */ + public boolean isUnbounded() { + return this.timestamp == null && this.precision == null; + } + + /** + * Returns the timestamp of this bound. + * + * @throws IllegalStateException if this bound is {@linkplain #isUnbounded() unbounded}. + */ + @NonNull + public ZonedDateTime getTimestamp() { + if (isUnbounded()) { + throw new IllegalStateException( + "Can't call this method on UNBOUNDED, use isUnbounded() to check first"); + } + assert timestamp != null; + return timestamp; + } + + /** + * Returns the precision of this bound. + * + * @throws IllegalStateException if this bound is {@linkplain #isUnbounded() unbounded}. + */ + @NonNull + public DateRangePrecision getPrecision() { + if (isUnbounded()) { + throw new IllegalStateException( + "Can't call this method on UNBOUNDED, use isUnbounded() to check first"); + } + assert precision != null; + return precision; + } + + /** + * Returns this bound as a Lucene-compliant string. + * + *

Unbounded bounds always return "{@code *}"; all other bounds are formatted in one of the + * common ISO-8601 datetime formats, depending on their precision. + * + *

Note that Lucene expects timestamps in UTC only. Timezone presence is always optional, and + * if present, it must be expressed with the symbol "Z" exclusively. Therefore this method does + * not include any timezone information in the returned string, except for bounds with {@linkplain + * DateRangePrecision#MILLISECOND millisecond} precision, where the symbol "Z" is always appended + * to the resulting string. + */ + @NonNull + @Override + public String toString() { + if (isUnbounded()) { + return "*"; + } else { + assert timestamp != null && precision != null; + return precision.format(timestamp); + } + } + + @Override + public boolean equals(@Nullable Object other) { + if (other == this) { + return true; + } else if (other instanceof DateRangeBound) { + DateRangeBound that = (DateRangeBound) other; + return Objects.equals(this.timestamp, that.timestamp) + && Objects.equals(this.precision, that.precision); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, precision); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java new file mode 100644 index 00000000000..ce811466c38 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecision.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.time; + +import com.datastax.dse.driver.internal.core.search.DateRangeUtil; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.ChronoField; +import java.time.temporal.ChronoUnit; +import java.util.Locale; +import java.util.Map; + +/** The precision of a {@link DateRangeBound}. */ +public enum DateRangePrecision { + MILLISECOND( + 0x06, + ChronoUnit.MILLIS, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu-MM-dd'T'HH:mm:ss.SSS") + .optionalStart() + .appendZoneId() + .optionalEnd() + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)), + SECOND( + 0x05, + ChronoUnit.SECONDS, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu-MM-dd'T'HH:mm:ss") + .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)), + MINUTE( + 0x04, + ChronoUnit.MINUTES, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu-MM-dd'T'HH:mm") + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)), + HOUR( + 0x03, + ChronoUnit.HOURS, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu-MM-dd'T'HH") + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)), + DAY( + 0x02, + ChronoUnit.DAYS, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu-MM-dd") + .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)), + MONTH( + 0x01, + ChronoUnit.MONTHS, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu-MM") + .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) + .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)), + YEAR( + 0x00, + ChronoUnit.YEARS, + new DateTimeFormatterBuilder() + .parseCaseSensitive() + .parseStrict() + .appendPattern("uuuu") + .parseDefaulting(ChronoField.MONTH_OF_YEAR, 1) + .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) + .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .parseDefaulting(ChronoField.MILLI_OF_SECOND, 0) + .toFormatter() + .withZone(ZoneOffset.UTC) + .withLocale(Locale.ROOT)); + + private final byte encoding; + private final ChronoUnit roundingUnit; + // The formatter is only used for formatting (parsing is done with DateRangeUtil.parseCalendar to + // be exactly the same as DSE's). + // If that ever were to change, note that DateTimeFormatters with a time zone have a parsing bug + // in Java 8: the formatter's zone will always be used, even if the input string specifies one + // explicitly. + // See https://stackoverflow.com/questions/41999421 + private final DateTimeFormatter formatter; + + DateRangePrecision(int encoding, ChronoUnit roundingUnit, DateTimeFormatter formatter) { + this.encoding = (byte) encoding; + this.roundingUnit = roundingUnit; + this.formatter = formatter; + } + + private static final Map ENCODINGS; + + static { + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (DateRangePrecision precision : values()) { + builder.put(precision.encoding, precision); + } + ENCODINGS = builder.build(); + } + + public static DateRangePrecision fromEncoding(byte encoding) { + DateRangePrecision precision = ENCODINGS.get(encoding); + if (precision == null) { + throw new IllegalArgumentException("Invalid precision encoding: " + encoding); + } + return precision; + } + + /** The code used to represent the precision when a date range is encoded to binary. */ + public byte getEncoding() { + return encoding; + } + + /** + * Rounds up the given timestamp to this precision. + * + *

Temporal fields smaller than this precision will be rounded up; other fields will be left + * untouched. + */ + @NonNull + public ZonedDateTime roundUp(@NonNull ZonedDateTime timestamp) { + Preconditions.checkNotNull(timestamp); + return DateRangeUtil.roundUp(timestamp, roundingUnit); + } + + /** + * Rounds down the given timestamp to this precision. + * + *

Temporal fields smaller than this precision will be rounded down; other fields will be left + * untouched. + */ + @NonNull + public ZonedDateTime roundDown(@NonNull ZonedDateTime timestamp) { + Preconditions.checkNotNull(timestamp); + return DateRangeUtil.roundDown(timestamp, roundingUnit); + } + + /** Formats the given timestamp according to this precision. */ + public String format(ZonedDateTime timestamp) { + return formatter.format(timestamp); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java new file mode 100644 index 00000000000..995de53959b --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.GraphExecutionInfoConverter; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Iterator; +import java.util.concurrent.CompletionStage; + +/** + * The result of an asynchronous graph query. + * + *

The default implementation returned by the driver is not thread-safe: the iterable + * returned by {@link #currentPage()} should only be iterated by a single thread. However, if + * subsequent pages are requested via {@link #fetchNextPage()}, it's safe to process those new + * instances in other threads (as long as each individual page of results is not accessed + * concurrently). + * + * @see GraphResultSet + */ +public interface AsyncGraphResultSet { + + /** The execution information for this page of results. */ + @NonNull + default ExecutionInfo getRequestExecutionInfo() { + return GraphExecutionInfoConverter.convert(getExecutionInfo()); + } + + /** + * The execution information for this page of results. + * + * @deprecated Use {@link #getRequestExecutionInfo()} instead. + */ + @Deprecated + @NonNull + com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo(); + + /** How many rows are left before the current page is exhausted. */ + int remaining(); + + /** + * The nodes in the current page. To keep iterating beyond that, use {@link #hasMorePages()} and + * {@link #fetchNextPage()}. + * + *

Note that this method always returns the same object, and that that object can only be + * iterated once: nodes are "consumed" as they are read. + */ + @NonNull + Iterable currentPage(); + + /** + * Returns the next node, or {@code null} if the result set is exhausted. + * + *

This is convenient for queries that are known to return exactly one node. + */ + @Nullable + default GraphNode one() { + Iterator iterator = currentPage().iterator(); + return iterator.hasNext() ? iterator.next() : null; + } + + /** + * Whether there are more pages of results. If so, call {@link #fetchNextPage()} to fetch the next + * one asynchronously. + */ + boolean hasMorePages(); + + /** + * Fetch the next page of results asynchronously. + * + * @throws IllegalStateException if there are no more pages. Use {@link #hasMorePages()} to check + * if you can call this method. + */ + @NonNull + CompletionStage fetchNextPage() throws IllegalStateException; + + /** + * Cancels the query and asks the server to stop sending results. + * + *

At this time, graph queries are not paginated and the server sends all the results at once; + * therefore this method has no effect. + */ + void cancel(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java new file mode 100644 index 00000000000..2169dc5f053 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultBatchGraphStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +/** + * A graph statement that groups multiple mutating traversals together, to be executed in the + * same transaction. + * + *

It is reserved for graph mutations, and does not return any result. + * + *

All the mutations grouped in the batch will either all succeed, or they will all be discarded + * and return an error. + * + *

The default implementation returned by the driver is immutable and thread-safe. Each mutation + * operation returns a copy. If you chain many of those operations, it is recommended to use {@link + * #builder()} instead for better memory usage. + * + *

Typically used like so: + * + *

{@code
+ * import static com.datastax.dse.driver.api.core.graph.DseGraph.g;
+ *
+ * BatchGraphStatement statement =
+ *     BatchGraphStatement.builder()
+ *         .addTraversal(
+ *                 g.addV("person").property("name", "batch1").property("age", 1))
+ *         .addTraversal(
+ *                 g.addV("person").property("name", "batch2").property("age", 2))
+ *         .build();
+ *
+ * GraphResultSet graphResultSet = dseSession.execute(statement);
+ * }
+ * + * @see DseGraph#g + */ +public interface BatchGraphStatement + extends GraphStatement, Iterable { + + /** + * Create a new, empty instance. + * + *

Traversals can be added with {@link #addTraversal(GraphTraversal)}. + */ + @NonNull + static BatchGraphStatement newInstance() { + return new DefaultBatchGraphStatement( + ImmutableList.of(), + null, + null, + null, + Statement.NO_DEFAULT_TIMESTAMP, + null, + null, + Collections.emptyMap(), + null, + null, + null, + null, + null, + null); + } + + /** Create a new instance from the given list of traversals. */ + @NonNull + static BatchGraphStatement newInstance(@NonNull Iterable traversals) { + return new DefaultBatchGraphStatement( + traversals, + null, + null, + null, + Statement.NO_DEFAULT_TIMESTAMP, + null, + null, + Collections.emptyMap(), + null, + null, + null, + null, + null, + null); + } + + /** Create a new instance from the given list of traversals. */ + @NonNull + static BatchGraphStatement newInstance(@NonNull GraphTraversal... traversals) { + return newInstance(ImmutableList.copyOf(traversals)); + } + + /** + * Create a builder helper object to start creating a new instance. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static BatchGraphStatementBuilder builder() { + return new BatchGraphStatementBuilder(); + } + + /** + * Create a builder helper object to start creating a new instance with an existing statement as a + * template. The traversals and options set on the template will be copied for the new statement + * at the moment this method is called. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static BatchGraphStatementBuilder builder(@NonNull BatchGraphStatement template) { + return new BatchGraphStatementBuilder(template); + } + + /** + * Add a traversal to this statement. If many traversals need to be added, use a {@link + * #builder()}, or the {@link #addTraversals(Iterable)} method instead to avoid intermediary + * copies. + */ + @NonNull + BatchGraphStatement addTraversal(@NonNull GraphTraversal traversal); + + /** + * Adds several traversals to this statement. If this method is to be called many times, consider + * using a {@link #builder()} instead to avoid intermediary copies. + */ + @NonNull + BatchGraphStatement addTraversals(@NonNull Iterable traversals); + + /** Get the number of traversals already added to this statement. */ + int size(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java new file mode 100644 index 00000000000..ac1b85bdc71 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/BatchGraphStatementBuilder.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultBatchGraphStatement; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.NotThreadSafe; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +/** + * A builder to create a batch graph statement. + * + *

This class is mutable and not thread-safe. + */ +@NotThreadSafe +public class BatchGraphStatementBuilder + extends GraphStatementBuilderBase { + + private ImmutableList.Builder traversalsBuilder = ImmutableList.builder(); + private int traversalsCount; + + public BatchGraphStatementBuilder() { + // nothing to do + } + + public BatchGraphStatementBuilder(BatchGraphStatement template) { + super(template); + traversalsBuilder.addAll(template); + traversalsCount = template.size(); + } + + /** Add a traversal to this builder to include in the generated {@link BatchGraphStatement}. */ + @NonNull + public BatchGraphStatementBuilder addTraversal(@NonNull GraphTraversal traversal) { + traversalsBuilder.add(traversal); + traversalsCount += 1; + return this; + } + + /** + * Add several traversals to this builder to include in the generated {@link BatchGraphStatement}. + */ + @NonNull + public BatchGraphStatementBuilder addTraversals(@NonNull Iterable traversals) { + for (GraphTraversal traversal : traversals) { + traversalsBuilder.add(traversal); + traversalsCount += 1; + } + return this; + } + + /** + * Add several traversals to this builder to include in the generated {@link BatchGraphStatement}. + */ + @NonNull + public BatchGraphStatementBuilder addTraversals(@NonNull GraphTraversal... traversals) { + for (GraphTraversal traversal : traversals) { + traversalsBuilder.add(traversal); + traversalsCount += 1; + } + return this; + } + + /** Clears all the traversals previously added to this builder. */ + @NonNull + public BatchGraphStatementBuilder clearTraversals() { + traversalsBuilder = ImmutableList.builder(); + traversalsCount = 0; + return this; + } + + /** Returns the number of traversals added to this statement so far. */ + public int getTraversalsCount() { + return traversalsCount; + } + + @NonNull + @Override + public BatchGraphStatement build() { + return new DefaultBatchGraphStatement( + traversalsBuilder.build(), + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + buildCustomPayload(), + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java new file mode 100644 index 00000000000..dd1dbe95bc8 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraph.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultDseRemoteConnectionBuilder; +import com.datastax.oss.driver.api.core.CqlSession; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; + +/** + * General purpose utility class for interaction with DSE Graph via the DataStax Enterprise Java + * driver. + */ +public class DseGraph { + + /** + * IMPORTANT: As of Tinkerpop 3.3.5, you should no longer use this shortcut if you intend + * to connect the traversal to DSE Graph using a {@linkplain + * org.apache.tinkerpop.gremlin.process.remote.RemoteConnection remote connection}, for example + * via the {@link #remoteConnectionBuilder} method declared below. Instead of: + * + *

{@code
+   * DseSession session = ...;
+   * RemoteConnection remoteConnection = DseGraph.remoteConnectionBuilder(session).build();
+   * GraphTraversalSource g = DseGraph.g.withRemote(remoteConnection);
+   * }
+ * + * You should now use {@link AnonymousTraversalSource#traversal()}, and adopt the following idiom: + * + *
{@code
+   * DseSession session = ...;
+   * RemoteConnection remoteConnection = DseGraph.remoteConnectionBuilder(session).build();
+   * GraphTraversalSource g = AnonymousTraversalSource.traversal().withRemote(remoteConnection);
+   * }
+ * + * A general-purpose shortcut for a non-connected TinkerPop {@link GraphTraversalSource} + * based on an immutable empty graph. This is really just a shortcut to {@code + * EmptyGraph.instance().traversal();}. + * + *

It can be used to create {@link FluentGraphStatement} instances (recommended); for ease of + * use you may statically import this variable. + * + *

Calling {@code g.getGraph()} will return a local immutable empty graph which is in no way + * connected to the DSE Graph server, it will not allow to modify a DSE Graph directly. To act on + * data stored in DSE Graph you must use {@linkplain + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal traversal}s such as + * {@code DseGraph.g.V()}, {@code DseGraph.g.addV/addE()}. + */ + public static final GraphTraversalSource g = EmptyGraph.instance().traversal(); + + /** + * Returns a builder helper class to help create {@link + * org.apache.tinkerpop.gremlin.process.remote.RemoteConnection} implementations that seamlessly + * connect to DSE Graph using the {@link CqlSession} in parameter. + */ + public static DseGraphRemoteConnectionBuilder remoteConnectionBuilder(CqlSession dseSession) { + return new DefaultDseRemoteConnectionBuilder(dseSession); + } + + private DseGraph() { + // nothing to do + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java new file mode 100644 index 00000000000..c4210a5b3dd --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/DseGraphRemoteConnectionBuilder.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; + +/** + * A builder helper to create a {@link RemoteConnection} that will be used to build + * implicitly-executing fluent traversals. + * + *

To create an instance of this, use the {@link DseGraph#remoteConnectionBuilder(CqlSession)} + * method: + * + *

{@code
+ * DseSession dseSession = DseSession.builder().build();
+ * GraphTraversalSource g = AnonymousTraversalSource.traversal().withRemote(DseGraph.remoteConnectionBuilder(dseSession).build());
+ * List vertices = g.V().hasLabel("person").toList();
+ * }
+ * + * @see CqlSession + */ +public interface DseGraphRemoteConnectionBuilder { + + /** Build the remote connection that was configured with this builder. */ + RemoteConnection build(); + + /** + * Set a configuration profile that will be used for every traversal built using the remote + * connection. + * + *

For the list of options available for Graph requests, see the {@code reference.conf} + * configuration file. + */ + DseGraphRemoteConnectionBuilder withExecutionProfile(DriverExecutionProfile executionProfile); + + /** + * Set the name of an execution profile that will be used for every traversal using from the + * remote connection. Named profiles are pre-defined in the driver configuration. + * + *

For the list of options available for Graph requests, see the {@code reference.conf} + * configuration file. + */ + DseGraphRemoteConnectionBuilder withExecutionProfileName(String executionProfileName); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java new file mode 100644 index 00000000000..051c6501c65 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultFluentGraphStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +/** + * A graph statement that uses a TinkerPop {@link GraphTraversal} as the query. + * + *

Typically used like so: + * + *

{@code
+ * import static com.datastax.dse.driver.api.core.graph.DseGraph.g;
+ *
+ * FluentGraphStatement statement = FluentGraphStatement.newInstance(g.V().has("name", "marko"));
+ *
+ * GraphResultSet graphResultSet = dseSession.execute(statement);
+ * }
+ * + * @see DseGraph#g + */ +public interface FluentGraphStatement extends GraphStatement { + + /** + * Create a new instance from the given traversal. + * + *

Use {@link #builder(GraphTraversal)} if you want to set more options before building the + * final statement instance. + */ + @NonNull + static FluentGraphStatement newInstance(@NonNull GraphTraversal traversal) { + return new DefaultFluentGraphStatement( + traversal, + null, + null, + null, + Statement.NO_DEFAULT_TIMESTAMP, + null, + null, + Collections.emptyMap(), + null, + null, + null, + null, + null, + null); + } + + /** + * Create a builder object to start creating a new instance from the given traversal. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static FluentGraphStatementBuilder builder(@NonNull GraphTraversal traversal) { + return new FluentGraphStatementBuilder(traversal); + } + + /** + * Create a builder helper object to start creating a new instance with an existing statement as a + * template. The traversal and options set on the template will be copied for the new statement at + * the moment this method is called. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static FluentGraphStatementBuilder builder(@NonNull FluentGraphStatement template) { + return new FluentGraphStatementBuilder(template); + } + + /** The underlying TinkerPop object representing the traversal executed by this statement. */ + @NonNull + GraphTraversal getTraversal(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java new file mode 100644 index 00000000000..59e588c564a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/FluentGraphStatementBuilder.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultFluentGraphStatement; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.NotThreadSafe; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +/** + * A builder to create a fluent graph statement. + * + *

This class is mutable and not thread-safe. + */ +@NotThreadSafe +public class FluentGraphStatementBuilder + extends GraphStatementBuilderBase { + + private GraphTraversal traversal; + + public FluentGraphStatementBuilder(@NonNull GraphTraversal traversal) { + this.traversal = traversal; + } + + public FluentGraphStatementBuilder(@NonNull FluentGraphStatement template) { + super(template); + this.traversal = template.getTraversal(); + } + + @NonNull + @Override + public FluentGraphStatement build() { + return new DefaultFluentGraphStatement( + this.traversal, + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + buildCustomPayload(), + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java new file mode 100644 index 00000000000..758f6b358ed --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphExecutionInfo.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +/** + * Information about the execution of a graph statement. + * + * @deprecated This interface is not used by any driver component anymore; the driver now exposes + * instances of {@link com.datastax.oss.driver.api.core.cql.ExecutionInfo} for all Graph + * queries. + */ +@Deprecated +public interface GraphExecutionInfo { + + /** The statement that was executed. */ + GraphStatement getStatement(); + + /** The node that was used as a coordinator to successfully complete the query. */ + Node getCoordinator(); + + /** + * The number of speculative executions that were started for this query. + * + *

This does not include the initial, normal execution of the query. Therefore, if speculative + * executions are disabled, this will always be 0. If they are enabled and one speculative + * execution was triggered in addition to the initial execution, this will be 1, etc. + * + * @see SpeculativeExecutionPolicy + */ + int getSpeculativeExecutionCount(); + + /** + * The index of the execution that completed this query. + * + *

0 represents the initial, normal execution of the query, 1 the first speculative execution, + * etc. + * + * @see SpeculativeExecutionPolicy + */ + int getSuccessfulExecutionIndex(); + + /** + * The errors encountered on previous coordinators, if any. + * + *

The list is in chronological order, based on the time that the driver processed the error + * responses. If speculative executions are enabled, they run concurrently so their errors will be + * interleaved. A node can appear multiple times (if the retry policy decided to retry on the same + * node). + */ + List> getErrors(); + + /** + * The server-side warnings for this query, if any (otherwise the list will be empty). + * + *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower + * versions, this list will always be empty. + */ + List getWarnings(); + + /** + * The custom payload sent back by the server with the response, if any (otherwise the map will be + * empty). + * + *

This method returns a read-only view of the original map, but its values remain inherently + * mutable. If multiple clients will read these values, care should be taken not to corrupt the + * data (in particular, preserve the indices by calling {@link ByteBuffer#duplicate()}). + * + *

This feature is only available with {@link DefaultProtocolVersion#V4} or above; with lower + * versions, this map will always be empty. + */ + Map getIncomingPayload(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java new file mode 100644 index 00000000000..97d48a6b04d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphNode.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; + +/** + * A node in a tree-like structure representing a Graph or a Graph component. + * + *

It can be: + * + *

    + *
  • a scalar value of a primitive type (boolean, string, int, long, double); + *
  • a graph element (vertex, edge, path or property); + *
  • a list of nodes; + *
  • a set of nodes; + *
  • a map of nodes. + *
+ * + * This interface provides test methods to find out what a node represents, and conversion methods + * to cast it to a particular Java type. Two generic methods {@link #as(Class)} and {@link + * #as(GenericType)} can produce any arbitrary Java type, provided that the underlying serialization + * runtime has been correctly configured to support the requested conversion. + */ +public interface GraphNode { + + /** Whether this node represents a {@code null} value. */ + boolean isNull(); + + /** + * Returns {@code true} if this node is a {@link Map}, and {@code false} otherwise. + * + *

If this method returns {@code true}, you can convert this node with {@link #asMap()}, or use + * {@link #keys()} and {@link #getByKey(Object)} to access the individual fields (note that + * entries are not ordered, so {@link #getByIndex(int)} does not work). + */ + boolean isMap(); + + /** The keys of this map node, or an empty iterator if it is not a map. */ + Iterable keys(); + + /** + * Returns the value for the given key as a node. + * + *

If this node is not a map, or does not contain the specified key, {@code null} is returned. + * + *

If the property value has been explicitly set to {@code null}, implementors may return a + * special "null node" instead of {@code null}. + */ + GraphNode getByKey(Object key); + + /** Deserializes and returns this node as a {@link Map}. */ + Map asMap(); + + /** + * Returns {@code true} if this node is a {@link List}, and {@code false} otherwise. + * + *

If this method returns {@code true}, you can convert this node with {@link #asList()}, or + * use {@link #size()} and {@link #getByIndex(int)} to access the individual fields. + */ + boolean isList(); + + /** The size of the current node, if it is a list or map, or {@code 0} otherwise. */ + int size(); + + /** + * Returns the element at the given index as a node. + * + *

If this node is not a list, or {@code index} is out of bounds (i.e. less than zero or {@code + * >= size()}, {@code null} is returned; no exception will be thrown. + * + *

If the requested element has been explicitly set to {@code null}, implementors may return a + * special "null node" instead of {@code null}. + */ + GraphNode getByIndex(int index); + + /** Deserializes and returns this node as a {@link List}. */ + List asList(); + + /** + * Returns {@code true} if this node is a simple scalar value, (i.e., string, boolean or number), + * and {@code false} otherwise. + * + *

If this method returns {@code true}, you can convert this node with {@link #asString()}, + * {@link #asBoolean()}, {@link #asInt()}, {@link #asLong()} or {@link #asDouble()}. + */ + boolean isValue(); + + /** + * Returns this node as an integer. + * + *

If the underlying object is not convertible to integer, implementors may choose to either + * throw {@link ClassCastException} or return [null | empty | some default value], whichever is + * deemed more appropriate. + */ + int asInt(); + + /** + * Returns this node as a boolean. + * + *

If the underlying object is not convertible to boolean, implementors may choose to either + * throw {@link ClassCastException} or return [null | empty | some default value], whichever is + * deemed more appropriate. + */ + boolean asBoolean(); + + /** + * Returns this node as a long integer. + * + *

If the underlying object is not convertible to long, implementors may choose to either throw + * {@link ClassCastException} or return [null | empty | some default value], whichever is deemed + * more appropriate. + */ + long asLong(); + + /** + * Returns this node as a long integer. + * + *

If the underlying object is not convertible to double, implementors may choose to either + * throw {@link ClassCastException} or return [null | empty | some default value], whichever is + * deemed more appropriate. + */ + double asDouble(); + + /** + * A valid string representation of this node. + * + *

If the underlying object is not convertible to a string, implementors may choose to either + * throw {@link ClassCastException} or return an empty string, whichever is deemed more + * appropriate. + */ + String asString(); + + /** + * Deserializes and returns this node as an instance of {@code clazz}. + * + *

Before attempting such a conversion, there must be an appropriate converter configured on + * the underlying serialization runtime. + */ + ResultT as(Class clazz); + + /** + * Deserializes and returns this node as an instance of the given {@link GenericType type}. + * + *

Before attempting such a conversion, there must be an appropriate converter configured on + * the underlying serialization runtime. + */ + ResultT as(GenericType type); + + /** + * Returns {@code true} if this node is a {@link Vertex}, and {@code false} otherwise. + * + *

If this method returns {@code true}, then {@link #asVertex()} can be safely called. + */ + boolean isVertex(); + + /** Returns this node as a Tinkerpop {@link Vertex}. */ + Vertex asVertex(); + + /** + * Returns {@code true} if this node is a {@link Edge}, and {@code false} otherwise. + * + *

If this method returns {@code true}, then {@link #asEdge()} can be safely called. + */ + boolean isEdge(); + + /** Returns this node as a Tinkerpop {@link Edge}. */ + Edge asEdge(); + + /** + * Returns {@code true} if this node is a {@link Path}, and {@code false} otherwise. + * + *

If this method returns {@code true}, then {@link #asPath()} can be safely called. + */ + boolean isPath(); + + /** Returns this node as a Tinkerpop {@link Path}. */ + Path asPath(); + + /** + * Returns {@code true} if this node is a {@link Property}, and {@code false} otherwise. + * + *

If this method returns {@code true}, then {@link #asProperty()} can be safely called. + */ + boolean isProperty(); + + /** Returns this node as a Tinkerpop {@link Property}. */ + Property asProperty(); + + /** + * Returns {@code true} if this node is a {@link VertexProperty}, and {@code false} otherwise. + * + *

If this method returns {@code true}, then {@link #asVertexProperty()} ()} can be safely + * called. + */ + boolean isVertexProperty(); + + /** Returns this node as a Tinkerpop {@link VertexProperty}. */ + VertexProperty asVertexProperty(); + + /** + * Returns {@code true} if this node is a {@link Set}, and {@code false} otherwise. + * + *

If this method returns {@code true}, you can convert this node with {@link #asSet()}, or use + * {@link #size()}. + */ + boolean isSet(); + + /** Deserializes and returns this node as a {@link Set}. */ + Set asSet(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java new file mode 100644 index 00000000000..d9c8d8fa460 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphResultSet.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.GraphExecutionInfoConverter; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +/** + * The result of a synchronous Graph query. + * + *

This object is a container for {@link GraphNode} objects that will contain the data returned + * by Graph queries. + * + *

Note that this object can only be iterated once: items are "consumed" as they are read, + * subsequent calls to {@code iterator()} will return the same iterator instance. + * + *

The default implementation returned by the driver is not thread-safe. It can only be + * iterated by the thread that invoked {@code dseSession.execute}. + * + * @see GraphNode + * @see GraphSession#execute(GraphStatement) + */ +public interface GraphResultSet extends Iterable { + + /** + * Returns the next node, or {@code null} if the result set is exhausted. + * + *

This is convenient for queries that are known to return exactly one row, for example count + * queries. + */ + @Nullable + default GraphNode one() { + Iterator graphNodeIterator = iterator(); + return graphNodeIterator.hasNext() ? graphNodeIterator.next() : null; + } + + /** + * Returns all the remaining nodes as a list; not recommended for paginated queries that return + * a large number of nodes. + * + *

At this time (DSE 6.0.0), graph queries are not paginated and the server sends all the + * results at once. + */ + @NonNull + default List all() { + if (!iterator().hasNext()) { + return Collections.emptyList(); + } + return ImmutableList.copyOf(this); + } + + /** + * Cancels the query and asks the server to stop sending results. + * + *

At this time (DSE 6.0.0), graph queries are not paginated and the server sends all the + * results at once; therefore this method has no effect. + */ + void cancel(); + + /** + * The execution information for the query that have been performed to assemble this result set. + */ + @NonNull + default ExecutionInfo getRequestExecutionInfo() { + return GraphExecutionInfoConverter.convert(getExecutionInfo()); + } + + /** @deprecated Use {@link #getRequestExecutionInfo()} instead. */ + @Deprecated + @NonNull + com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java new file mode 100644 index 00000000000..b985bc56353 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphSession.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.session.Session; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import java.util.concurrent.CompletionStage; + +/** + * A session that has the ability to execute DSE Graph requests. + * + *

Generally this interface won't be referenced directly in an application; instead, you should + * use {@link CqlSession}, which is a combination of this interface and many others for a more + * integrated usage of DataStax Enterprise's multi-model database via a single entry point. However, + * it is still possible to cast a {@code CqlSession} to a {@code GraphSession} to only expose the + * DSE Graph execution methods. + */ +public interface GraphSession extends Session { + + /** + * Executes a graph statement synchronously (the calling thread blocks until the result becomes + * available). + * + *

The driver provides different kinds of graph statements: + * + *

    + *
  • {@link FluentGraphStatement} (recommended): wraps a fluent TinkerPop {@linkplain + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal traversal}; + *
  • {@link BatchGraphStatement}: groups together multiple mutating traversals ({@code + * g.addV()/g.addE()}) inside a single transaction and avoids multiple client-server + * round-trips. Improves performance in data ingestion scenarios; + *
  • {@link ScriptGraphStatement}: wraps a Gremlin-groovy script provided as a plain Java + * string. Required for administrative queries such as creating/dropping a graph, + * configuration and schema. + *
+ * + *

This feature is only available with DataStax Enterprise. Executing graph queries against an + * Apache Cassandra® cluster will result in a runtime error. + * + * @see GraphResultSet + * @param graphStatement the graph query to execute (that can be any {@code GraphStatement}). + * @return the result of the graph query. That result will never be null but can be empty. + */ + @NonNull + default GraphResultSet execute(@NonNull GraphStatement graphStatement) { + return Objects.requireNonNull( + execute(graphStatement, GraphStatement.SYNC), + "The graph processor should never return a null result"); + } + + /** + * Executes a graph statement asynchronously (the call returns as soon as the statement was sent, + * generally before the result is available). + * + *

This feature is only available with DataStax Enterprise. Executing graph queries against an + * Apache Cassandra® cluster will result in a runtime error. + * + * @see #execute(GraphStatement) + * @see AsyncGraphResultSet + * @param graphStatement the graph query to execute (that can be any {@code GraphStatement}). + * @return the {@code CompletionStage} on the result of the graph query. + */ + @NonNull + default CompletionStage executeAsync( + @NonNull GraphStatement graphStatement) { + return Objects.requireNonNull( + execute(graphStatement, GraphStatement.ASYNC), + "The graph processor should never return a null result"); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java new file mode 100644 index 00000000000..f770469b824 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatement.java @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.CheckReturnValue; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Map; +import java.util.concurrent.CompletionStage; + +/** + * A request to execute a DSE Graph query. + * + * @param the "self type" used for covariant returns in subtypes. + */ +public interface GraphStatement> extends Request { + + /** + * The type returned when a graph statement is executed synchronously. + * + *

Most users won't use this explicitly. It is needed for the generic execute method ({@link + * Session#execute(Request, GenericType)}), but graph statements will generally be run with one of + * the DSE driver's built-in helper methods (such as {@link CqlSession#execute(GraphStatement)}). + */ + GenericType SYNC = GenericType.of(GraphResultSet.class); + + /** + * The type returned when a graph statement is executed asynchronously. + * + *

Most users won't use this explicitly. It is needed for the generic execute method ({@link + * Session#execute(Request, GenericType)}), but graph statements will generally be run with one of + * the DSE driver's built-in helper methods (such as {@link + * CqlSession#executeAsync(GraphStatement)}). + */ + GenericType> ASYNC = + new GenericType>() {}; + + /** + * Set the idempotence to use for execution. + * + *

Idempotence defines whether it will be possible to speculatively re-execute the statement, + * based on a {@link SpeculativeExecutionPolicy}. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @param idempotent a boolean instance to set a statement-specific value, or {@code null} to use + * the default idempotence defined in the configuration. + */ + @NonNull + @CheckReturnValue + SelfT setIdempotent(@Nullable Boolean idempotent); + + /** + * {@inheritDoc} + * + *

Note that, if this method returns {@code null}, graph statements fall back to a dedicated + * configuration option: {@code basic.graph.timeout}. See {@code reference.conf} in the DSE driver + * distribution for more details. + */ + @Nullable + @Override + Duration getTimeout(); + + /** + * Sets how long to wait for this request to complete. This is a global limit on the duration of a + * session.execute() call, including any retries the driver might do. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @param newTimeout the timeout to use, or {@code null} to use the default value defined in the + * configuration. + * @see #getTimeout() + */ + @NonNull + @CheckReturnValue + SelfT setTimeout(@Nullable Duration newTimeout); + + /** + * Sets the {@link Node} that should handle this query. + * + *

In the general case, use of this method is heavily discouraged and should only be + * used in specific cases, such as applying a series of schema changes, which may be advantageous + * to execute in sequence on the same node. + * + *

Configuring a specific node causes the configured {@link LoadBalancingPolicy} to be + * completely bypassed. However, if the load balancing policy dictates that the node is at + * distance {@link NodeDistance#IGNORED} or there is no active connectivity to the node, the + * request will fail with a {@link NoNodeAvailableException}. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @param newNode The node that should be used to handle executions of this statement or null to + * delegate to the configured load balancing policy. + */ + @NonNull + @CheckReturnValue + SelfT setNode(@Nullable Node newNode); + + /** + * Get the timestamp set on the statement. + * + *

By default, if left unset, the value returned by this is {@code Long.MIN_VALUE}, which means + * that the timestamp will be set via the Timestamp Generator. + * + * @return the timestamp set on this statement. + */ + long getTimestamp(); + + /** + * Set the timestamp to use for execution. + * + *

By default the timestamp generator (see reference config file) will be used for timestamps, + * unless set explicitly via this method. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + */ + @CheckReturnValue + SelfT setTimestamp(long timestamp); + + /** + * Sets the configuration profile to use for execution. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + */ + @NonNull + @CheckReturnValue + SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile); + + /** + * Sets the name of the driver configuration profile that will be used for execution. + * + *

For all the driver's built-in implementations, this method has no effect if {@link + * #setExecutionProfile} has been called with a non-null argument. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + */ + @NonNull + @CheckReturnValue + SelfT setExecutionProfileName(@Nullable String name); + + /** + * Sets the custom payload to use for execution. + * + *

This is intended for advanced use cases, such as tools with very advanced knowledge of DSE + * Graph, and reserved for internal settings like transaction settings. Note that the driver also + * adds graph-related options to the payload, in addition to the ones provided here; it won't + * override any option that is already present. + * + *

All the driver's built-in statement implementations are immutable, and return a new instance + * from this method. However custom implementations may choose to be mutable and return the same + * instance. + * + *

Note that it's your responsibility to provide a thread-safe map. This can be achieved with a + * concurrent or immutable implementation, or by making it effectively immutable (meaning that + * it's never modified after being set on the statement). + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + */ + @NonNull + @CheckReturnValue + SelfT setCustomPayload(@NonNull Map newCustomPayload); + + /** + * The name of the graph to use for this statement. + * + *

This is the programmatic equivalent of the configuration option {@code basic.graph.name}, + * and takes precedence over it. That is, if this property is non-null, then the configuration + * will be ignored. + */ + @Nullable + String getGraphName(); + + /** + * Sets the graph name. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @see #getGraphName() + */ + @NonNull + @CheckReturnValue + SelfT setGraphName(@Nullable String newGraphName); + + /** + * The name of the traversal source to use for this statement. + * + *

This is the programmatic equivalent of the configuration option {@code + * basic.graph.traversal-source}, and takes precedence over it. That is, if this property is + * non-null, then the configuration will be ignored. + */ + @Nullable + String getTraversalSource(); + + /** + * Sets the traversal source. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @see #getTraversalSource() + */ + @NonNull + @CheckReturnValue + SelfT setTraversalSource(@Nullable String newTraversalSource); + + /** + * The DSE graph sub-protocol to use for this statement. + * + *

This is the programmatic equivalent of the configuration option {@code + * advanced.graph.sub-protocol}, and takes precedence over it. That is, if this property is + * non-null, then the configuration will be ignored. + */ + @Nullable + String getSubProtocol(); + + /** + * Sets the sub-protocol. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @see #getSubProtocol() + */ + @NonNull + @CheckReturnValue + SelfT setSubProtocol(@Nullable String newSubProtocol); + + /** + * Returns the consistency level to use for the statement. + * + *

This is the programmatic equivalent of the configuration option {@code + * basic.request.consistency}, and takes precedence over it. That is, if this property is + * non-null, then the configuration will be ignored. + */ + @Nullable + ConsistencyLevel getConsistencyLevel(); + + /** + * Sets the consistency level to use for this statement. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @param newConsistencyLevel the consistency level to use, or null to use the default value + * defined in the configuration. + * @see #getConsistencyLevel() + */ + @CheckReturnValue + SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel); + + /** + * The consistency level to use for the internal read queries that will be produced by this + * statement. + * + *

This is the programmatic equivalent of the configuration option {@code + * basic.graph.read-consistency-level}, and takes precedence over it. That is, if this property is + * non-null, then the configuration will be ignored. + * + *

If this property isn't set here or in the configuration, the default consistency level will + * be used ({@link #getConsistencyLevel()} or {@code basic.request.consistency}). + */ + @Nullable + ConsistencyLevel getReadConsistencyLevel(); + + /** + * Sets the read consistency level. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @see #getReadConsistencyLevel() + */ + @NonNull + @CheckReturnValue + SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel newReadConsistencyLevel); + + /** + * The consistency level to use for the internal write queries that will be produced by this + * statement. + * + *

This is the programmatic equivalent of the configuration option {@code + * basic.graph.write-consistency-level}, and takes precedence over it. That is, if this property + * is non-null, then the configuration will be ignored. + * + *

If this property isn't set here or in the configuration, the default consistency level will + * be used ({@link #getConsistencyLevel()} or {@code basic.request.consistency}). + */ + @Nullable + ConsistencyLevel getWriteConsistencyLevel(); + + /** + * Sets the write consistency level. + * + *

All the driver's built-in implementations are immutable, and return a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @see #getWriteConsistencyLevel() + */ + @NonNull + @CheckReturnValue + SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel newWriteConsistencyLevel); + + /** Graph statements do not have a per-query keyspace, this method always returns {@code null}. */ + @Nullable + @Override + default CqlIdentifier getKeyspace() { + return null; + } + + /** Graph statements can't be routed, this method always returns {@code null}. */ + @Nullable + @Override + default CqlIdentifier getRoutingKeyspace() { + return null; + } + + /** Graph statements can't be routed, this method always returns {@code null}. */ + @Nullable + @Override + default ByteBuffer getRoutingKey() { + return null; + } + + /** Graph statements can't be routed, this method always returns {@code null}. */ + @Nullable + @Override + default Token getRoutingToken() { + return null; + } + + /** + * Whether tracing information should be recorded for this statement. + * + *

This method is only exposed for future extensibility. At the time of writing, graph + * statements do not support tracing, and this always returns {@code false}. + */ + default boolean isTracing() { + return false; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java new file mode 100644 index 00000000000..5cb48613cf5 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/GraphStatementBuilderBase.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Map; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe +public abstract class GraphStatementBuilderBase< + SelfT extends GraphStatementBuilderBase, + StatementT extends GraphStatement> { + + @SuppressWarnings({"unchecked"}) + private final SelfT self = (SelfT) this; + + protected Boolean isIdempotent; + protected Duration timeout; + protected Node node; + protected long timestamp = Statement.NO_DEFAULT_TIMESTAMP; + protected DriverExecutionProfile executionProfile; + protected String executionProfileName; + private NullAllowingImmutableMap.Builder customPayloadBuilder; + protected String graphName; + protected String traversalSource; + protected String subProtocol; + protected ConsistencyLevel consistencyLevel; + protected ConsistencyLevel readConsistencyLevel; + protected ConsistencyLevel writeConsistencyLevel; + + protected GraphStatementBuilderBase() { + // nothing to do + } + + protected GraphStatementBuilderBase(StatementT template) { + this.isIdempotent = template.isIdempotent(); + this.timeout = template.getTimeout(); + this.node = template.getNode(); + this.timestamp = template.getTimestamp(); + this.executionProfile = template.getExecutionProfile(); + this.executionProfileName = template.getExecutionProfileName(); + if (!template.getCustomPayload().isEmpty()) { + this.customPayloadBuilder = + NullAllowingImmutableMap.builder() + .putAll(template.getCustomPayload()); + } + this.graphName = template.getGraphName(); + this.traversalSource = template.getTraversalSource(); + this.subProtocol = template.getSubProtocol(); + this.consistencyLevel = template.getConsistencyLevel(); + this.readConsistencyLevel = template.getReadConsistencyLevel(); + this.writeConsistencyLevel = template.getWriteConsistencyLevel(); + } + + /** @see GraphStatement#setIdempotent(Boolean) */ + @NonNull + public SelfT setIdempotence(@Nullable Boolean idempotent) { + this.isIdempotent = idempotent; + return self; + } + + /** @see GraphStatement#setTimeout(Duration) */ + @NonNull + public SelfT setTimeout(@Nullable Duration timeout) { + this.timeout = timeout; + return self; + } + + /** @see GraphStatement#setNode(Node) */ + @NonNull + public SelfT setNode(@Nullable Node node) { + this.node = node; + return self; + } + + /** @see GraphStatement#setTimestamp(long) */ + @NonNull + public SelfT setTimestamp(long timestamp) { + this.timestamp = timestamp; + return self; + } + + /** @see GraphStatement#setExecutionProfileName(String) */ + @NonNull + public SelfT setExecutionProfileName(@Nullable String executionProfileName) { + this.executionProfileName = executionProfileName; + return self; + } + + /** @see GraphStatement#setExecutionProfile(DriverExecutionProfile) */ + @NonNull + public SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile) { + this.executionProfile = executionProfile; + this.executionProfileName = null; + return self; + } + + /** @see GraphStatement#setCustomPayload(Map) */ + @NonNull + public SelfT addCustomPayload(@NonNull String key, @Nullable ByteBuffer value) { + if (customPayloadBuilder == null) { + customPayloadBuilder = NullAllowingImmutableMap.builder(); + } + customPayloadBuilder.put(key, value); + return self; + } + + /** @see GraphStatement#setCustomPayload(Map) */ + @NonNull + public SelfT clearCustomPayload() { + customPayloadBuilder = null; + return self; + } + + /** @see GraphStatement#setGraphName(String) */ + @NonNull + public SelfT setGraphName(@Nullable String graphName) { + this.graphName = graphName; + return self; + } + + /** @see GraphStatement#setTraversalSource(String) */ + @NonNull + public SelfT setTraversalSource(@Nullable String traversalSource) { + this.traversalSource = traversalSource; + return self; + } + + /** @see GraphStatement#setSubProtocol(String) */ + @NonNull + public SelfT setSubProtocol(@Nullable String subProtocol) { + this.subProtocol = subProtocol; + return self; + } + + /** @see GraphStatement#setConsistencyLevel(ConsistencyLevel) */ + @NonNull + public SelfT setConsistencyLevel(@Nullable ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + return self; + } + + /** @see GraphStatement#setReadConsistencyLevel(ConsistencyLevel) */ + @NonNull + public SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel readConsistencyLevel) { + this.readConsistencyLevel = readConsistencyLevel; + return self; + } + + /** @see GraphStatement#setWriteConsistencyLevel(ConsistencyLevel) */ + @NonNull + public SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel writeConsistencyLevel) { + this.writeConsistencyLevel = writeConsistencyLevel; + return self; + } + + @NonNull + protected Map buildCustomPayload() { + return (customPayloadBuilder == null) + ? NullAllowingImmutableMap.of() + : customPayloadBuilder.build(); + } + + /** Create the statement with the configuration defined by this builder object. */ + @NonNull + public abstract StatementT build(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java new file mode 100644 index 00000000000..f59d0e50e93 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/PagingEnabledOptions.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +public enum PagingEnabledOptions { + ENABLED, + DISABLED, + AUTO +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java new file mode 100644 index 00000000000..2ad7aafc232 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultScriptGraphStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Map; + +/** + * A graph statement that uses a Gremlin-groovy script the query. + * + *

These statements are generally used for DSE Graph set-up queries, such as creating or dropping + * a graph, or defining a graph schema. For graph traversals, we recommend using {@link + * FluentGraphStatement} instead. To do bulk data ingestion in graph, we recommend using {@link + * BatchGraphStatement} instead. + * + *

Typical usage: + * + *

{@code
+ * ScriptGraphStatement statement = ScriptGraphStatement.newInstance("schema.propertyKey('age').Int().create()");
+ *
+ * GraphResultSet graphResultSet = dseSession.execute(statement);
+ * }
+ */ +public interface ScriptGraphStatement extends GraphStatement { + + /** Create a new instance from the given script. */ + @NonNull + static ScriptGraphStatement newInstance(@NonNull String script) { + return new DefaultScriptGraphStatement( + script, + NullAllowingImmutableMap.of(), + null, + null, + null, + null, + Statement.NO_DEFAULT_TIMESTAMP, + null, + null, + Collections.emptyMap(), + null, + null, + null, + null, + null, + null); + } + + /** + * Create a builder object to start creating a new instance from the given script. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static ScriptGraphStatementBuilder builder(@NonNull String script) { + return new ScriptGraphStatementBuilder(script); + } + + /** + * Create a builder helper object to start creating a new instance with an existing statement as a + * template. The script and options set on the template will be copied for the new statement at + * the moment this method is called. + * + *

Note that this builder is mutable and not thread-safe. + */ + @NonNull + static ScriptGraphStatementBuilder builder(@NonNull ScriptGraphStatement template) { + return new ScriptGraphStatementBuilder(template); + } + + /** The Gremlin-groovy script representing the graph query. */ + @NonNull + String getScript(); + + /** + * Whether the statement is a system query, or {@code null} if it defaults to the value defined in + * the configuration. + * + * @see #setSystemQuery(Boolean) + */ + @Nullable + Boolean isSystemQuery(); + + /** + * Defines if this statement is a system query. + * + *

Script statements that access the {@code system} variable must not specify a graph + * name (otherwise {@code system} is not available). However, if your application executes a lot + * of non-system statements, it is convenient to configure the graph name in your configuration to + * avoid repeating it every time. This method allows you to ignore that global graph name for a + * specific statement. + * + *

This property is the programmatic equivalent of the configuration option {@code + * basic.graph.is-system-query}, and takes precedence over it. That is, if this property is + * non-null, then the configuration will be ignored. + * + *

The driver's built-in implementation is immutable, and returns a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * + * @param newValue {@code true} to mark this statement as a system query (the driver will ignore + * any graph name set on the statement or the configuration); {@code false} to mark it as a + * non-system query; {@code null} to default to the value defined in the configuration. + * @see #isSystemQuery() + */ + @NonNull + ScriptGraphStatement setSystemQuery(@Nullable Boolean newValue); + + /** + * The query parameters to send along the request. + * + * @see #setQueryParam(String, Object) + */ + @NonNull + Map getQueryParams(); + + /** + * Set a value for a parameter defined in the Groovy script. + * + *

The script engine in the DSE Graph server allows to define parameters in a Groovy script and + * set the values of these parameters as a binding. Defining parameters allows to re-use scripts + * and only change their parameters values, which improves the performance of the script executed, + * so defining parameters is encouraged; however, for optimal Graph traversal performance, we + * recommend either using {@link BatchGraphStatement}s for data ingestion, or {@link + * FluentGraphStatement} for normal traversals. + * + *

Parameters in a Groovy script are always named; unlike CQL, they are not prefixed by a + * column ({@code :}). + * + *

The driver's built-in implementation is immutable, and returns a new instance from this + * method. However custom implementations may choose to be mutable and return the same instance. + * If many parameters are to be set in a query, it is recommended to create the statement with + * {@link #builder(String)} instead. + * + * @param name the name of the parameter defined in the script. If the statement already had a + * binding for this name, it gets replaced. + * @param value the value that will be transmitted with the request. + */ + @NonNull + ScriptGraphStatement setQueryParam(@NonNull String name, @Nullable Object value); + + /** + * Removes a binding for the given name from this statement. + * + *

If the statement did not have such a binding, this method has no effect and returns the same + * statement instance. Otherwise, the driver's built-in implementation returns a new instance + * (however custom implementations may choose to be mutable and return the same instance). + * + * @see #setQueryParam(String, Object) + */ + @NonNull + ScriptGraphStatement removeQueryParam(@NonNull String name); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java new file mode 100644 index 00000000000..1985c58955f --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/ScriptGraphStatementBuilder.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.DefaultScriptGraphStatement; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.Maps; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Map; +import net.jcip.annotations.NotThreadSafe; + +/** + * A builder to create a script graph statement. + * + *

This class is mutable and not thread-safe. + */ +@NotThreadSafe +public class ScriptGraphStatementBuilder + extends GraphStatementBuilderBase { + + private String script; + private Boolean isSystemQuery; + private final Map queryParams; + + public ScriptGraphStatementBuilder() { + this.queryParams = Maps.newHashMap(); + } + + public ScriptGraphStatementBuilder(String script) { + this.script = script; + this.queryParams = Maps.newHashMap(); + } + + public ScriptGraphStatementBuilder(ScriptGraphStatement template) { + super(template); + this.script = template.getScript(); + this.queryParams = Maps.newHashMap(template.getQueryParams()); + this.isSystemQuery = template.isSystemQuery(); + } + + @NonNull + public ScriptGraphStatementBuilder setScript(@NonNull String script) { + this.script = script; + return this; + } + + /** @see ScriptGraphStatement#isSystemQuery() */ + @NonNull + public ScriptGraphStatementBuilder setSystemQuery(@Nullable Boolean isSystemQuery) { + this.isSystemQuery = isSystemQuery; + return this; + } + + /** + * Set a value for a parameter defined in the script query. + * + * @see ScriptGraphStatement#setQueryParam(String, Object) + */ + @NonNull + public ScriptGraphStatementBuilder setQueryParam(@NonNull String name, @Nullable Object value) { + this.queryParams.put(name, value); + return this; + } + + /** + * Set multiple values for named parameters defined in the script query. + * + * @see ScriptGraphStatement#setQueryParam(String, Object) + */ + @NonNull + public ScriptGraphStatementBuilder setQueryParams(@NonNull Map params) { + this.queryParams.putAll(params); + return this; + } + + /** + * Removes a parameter. + * + *

This is useful if the builder was {@linkplain + * ScriptGraphStatement#builder(ScriptGraphStatement) initialized with a template statement} that + * has more parameters than desired. + * + * @see ScriptGraphStatement#setQueryParam(String, Object) + * @see #clearQueryParams() + */ + @NonNull + public ScriptGraphStatementBuilder removeQueryParam(@NonNull String name) { + this.queryParams.remove(name); + return this; + } + + /** Clears all the parameters previously added to this builder. */ + public ScriptGraphStatementBuilder clearQueryParams() { + this.queryParams.clear(); + return this; + } + + @NonNull + @Override + public ScriptGraphStatement build() { + Preconditions.checkNotNull(this.script, "Script hasn't been defined in this builder."); + return new DefaultScriptGraphStatement( + this.script, + this.queryParams, + this.isSystemQuery, + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + buildCustomPayload(), + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java new file mode 100644 index 00000000000..fdbf3fbe397 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollection.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.predicates; + +import com.datastax.dse.driver.internal.core.graph.CqlCollectionPredicate; +import java.util.Collection; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.javatuples.Pair; + +/** + * Predicates that can be used on CQL collections (lists, sets and maps). + * + *

Note: CQL collection predicates are only available when using the binary subprotocol. + */ +public class CqlCollection { + + /** + * Checks if the target collection contains the given value. + * + * @param value the value to look for; cannot be {@code null}. + * @return a predicate to apply in a {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. + */ + @SuppressWarnings("unchecked") + public static , V> P contains(V value) { + return new P(CqlCollectionPredicate.contains, value); + } + + /** + * Checks if the target map contains the given key. + * + * @param key the key to look for; cannot be {@code null}. + * @return a predicate to apply in a {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. + */ + @SuppressWarnings("unchecked") + public static , K> P containsKey(K key) { + return new P(CqlCollectionPredicate.containsKey, key); + } + + /** + * Checks if the target map contains the given value. + * + * @param value the value to look for; cannot be {@code null}. + * @return a predicate to apply in a {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. + */ + @SuppressWarnings("unchecked") + public static , V> P containsValue(V value) { + return new P(CqlCollectionPredicate.containsValue, value); + } + + /** + * Checks if the target map contains the given entry. + * + * @param key the key to look for; cannot be {@code null}. + * @param value the value to look for; cannot be {@code null}. + * @return a predicate to apply in a {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal}. + */ + @SuppressWarnings("unchecked") + public static , K, V> P entryEq(K key, V value) { + return new P(CqlCollectionPredicate.entryEq, new Pair<>(key, value)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java new file mode 100644 index 00000000000..65dd84d0076 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Geo.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.predicates; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.internal.core.data.geometry.Distance; +import com.datastax.dse.driver.internal.core.graph.GeoPredicate; +import com.datastax.dse.driver.internal.core.graph.GeoUtils; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +public interface Geo { + + enum Unit { + MILES(GeoUtils.MILES_TO_KM * GeoUtils.KM_TO_DEG), + KILOMETERS(GeoUtils.KM_TO_DEG), + METERS(GeoUtils.KM_TO_DEG / 1000.0), + DEGREES(1); + + private final double multiplier; + + Unit(double multiplier) { + this.multiplier = multiplier; + } + + /** Convert distance to degrees (used internally only). */ + public double toDegrees(double distance) { + return distance * multiplier; + } + } + + /** + * Finds whether an entity is inside the given circular area using a geo coordinate system. + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P inside(Point center, double radius, Unit units) { + return new P<>(GeoPredicate.inside, new Distance(center, units.toDegrees(radius))); + } + + /** + * Finds whether an entity is inside the given circular area using a cartesian coordinate system. + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P inside(Point center, double radius) { + return new P<>(GeoPredicate.insideCartesian, new Distance(center, radius)); + } + + /** + * Finds whether an entity is inside the given polygon. + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P inside(Polygon polygon) { + return new P<>(GeoPredicate.insideCartesian, polygon); + } + + /** + * Creates a point from the given coordinates. + * + *

This is just a shortcut to {@link Point#fromCoordinates(double, double)}. It is duplicated + * here so that {@code Geo} can be used as a single entry point in Gremlin-groovy scripts. + */ + @NonNull + static Point point(double x, double y) { + return Point.fromCoordinates(x, y); + } + + /** + * Creates a line string from the given (at least 2) points. + * + *

This is just a shortcut to {@link LineString#fromPoints(Point, Point, Point...)}. It is + * duplicated here so that {@code Geo} can be used as a single entry point in Gremlin-groovy + * scripts. + */ + @NonNull + static LineString lineString( + @NonNull Point point1, @NonNull Point point2, @NonNull Point... otherPoints) { + return LineString.fromPoints(point1, point2, otherPoints); + } + + /** + * Creates a line string from the coordinates of its points. + * + *

This is provided for backward compatibility with previous DSE versions. We recommend {@link + * #lineString(Point, Point, Point...)} instead. + */ + @NonNull + static LineString lineString(double... coordinates) { + if (coordinates.length % 2 != 0) { + throw new IllegalArgumentException("lineString() must be passed an even number of arguments"); + } else if (coordinates.length < 4) { + throw new IllegalArgumentException( + "lineString() must be passed at least 4 arguments (2 points)"); + } + Point point1 = Point.fromCoordinates(coordinates[0], coordinates[1]); + Point point2 = Point.fromCoordinates(coordinates[2], coordinates[3]); + Point[] otherPoints = new Point[coordinates.length / 2 - 2]; + for (int i = 4; i < coordinates.length; i += 2) { + otherPoints[i / 2 - 2] = Point.fromCoordinates(coordinates[i], coordinates[i + 1]); + } + return LineString.fromPoints(point1, point2, otherPoints); + } + + /** + * Creates a polygon from the given (at least 3) points. + * + *

This is just a shortcut to {@link Polygon#fromPoints(Point, Point, Point, Point...)}. It is + * duplicated here so that {@code Geo} can be used as a single entry point in Gremlin-groovy + * scripts. + */ + @NonNull + static Polygon polygon( + @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... otherPoints) { + return Polygon.fromPoints(p1, p2, p3, otherPoints); + } + + /** + * Creates a polygon from the coordinates of its points. + * + *

This is provided for backward compatibility with previous DSE versions. We recommend {@link + * #polygon(Point, Point, Point, Point...)} instead. + */ + @NonNull + static Polygon polygon(double... coordinates) { + if (coordinates.length % 2 != 0) { + throw new IllegalArgumentException("polygon() must be passed an even number of arguments"); + } else if (coordinates.length < 6) { + throw new IllegalArgumentException( + "polygon() must be passed at least 6 arguments (3 points)"); + } + Point point1 = Point.fromCoordinates(coordinates[0], coordinates[1]); + Point point2 = Point.fromCoordinates(coordinates[2], coordinates[3]); + Point point3 = Point.fromCoordinates(coordinates[4], coordinates[5]); + Point[] otherPoints = new Point[coordinates.length / 2 - 3]; + for (int i = 6; i < coordinates.length; i += 2) { + otherPoints[i / 2 - 3] = Point.fromCoordinates(coordinates[i], coordinates[i + 1]); + } + return Polygon.fromPoints(point1, point2, point3, otherPoints); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java new file mode 100644 index 00000000000..e285c118c8a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/predicates/Search.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.predicates; + +import com.datastax.dse.driver.internal.core.graph.EditDistance; +import com.datastax.dse.driver.internal.core.graph.SearchPredicate; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +public interface Search { + + /** + * Search any instance of a certain token within the text property targeted (case insensitive). + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P token(String value) { + return new P<>(SearchPredicate.token, value); + } + + /** + * Search any instance of a certain token prefix within the text property targeted (case + * insensitive). + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P tokenPrefix(String value) { + return new P<>(SearchPredicate.tokenPrefix, value); + } + + /** + * Search any instance of the provided regular expression for the targeted property (case + * insensitive). + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P tokenRegex(String value) { + return new P<>(SearchPredicate.tokenRegex, value); + } + + /** + * Search for a specific prefix at the beginning of the text property targeted (case sensitive). + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P prefix(String value) { + return new P<>(SearchPredicate.prefix, value); + } + + /** + * Search for this regular expression inside the text property targeted (case sensitive). + * + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P regex(String value) { + return new P<>(SearchPredicate.regex, value); + } + + /** + * Supports finding words which are a within a specific distance away (case insensitive). + * + *

Example: the search expression is {@code phrase("Hello world", 2)} + * + *

    + *
  • the inserted value "Hello world" is found + *
  • the inserted value "Hello wild world" is found + *
  • the inserted value "Hello big wild world" is found + *
  • the inserted value "Hello the big wild world" is not found + *
  • the inserted value "Goodbye world" is not found. + *
+ * + * @param query the string to look for in the value + * @param distance the number of terms allowed between two correct terms to find a value. + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P phrase(String query, int distance) { + return new P<>(SearchPredicate.phrase, new EditDistance(query, distance)); + } + + /** + * Supports fuzzy searches based on the Damerau-Levenshtein Distance, or Edit Distance algorithm + * (case sensitive). + * + *

Example: the search expression is {@code fuzzy("david", 1)} + * + *

    + *
  • the inserted value "david" is found + *
  • the inserted value "dawid" is found + *
  • the inserted value "davids" is found + *
  • the inserted value "dewid" is not found + *
+ * + * @param query the string to look for in the value + * @param distance the number of "uncertainties" allowed for the Levenshtein algorithm. + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P fuzzy(String query, int distance) { + return new P<>(SearchPredicate.fuzzy, new EditDistance(query, distance)); + } + + /** + * Supports fuzzy searches based on the Damerau-Levenshtein Distance, or Edit Distance algorithm + * after having tokenized the data stored (case insensitive). + * + *

Example: the search expression is {@code tokenFuzzy("david", 1)} + * + *

    + *
  • the inserted value "david" is found + *
  • the inserted value "dawid" is found + *
  • the inserted value "hello-dawid" is found + *
  • the inserted value "dewid" is not found + *
+ * + * @param query the string to look for in the value + * @param distance the number of "uncertainties" allowed for the Levenshtein algorithm. + * @return a predicate to apply in a {@link GraphTraversal}. + */ + static P tokenFuzzy(String query, int distance) { + return new P<>(SearchPredicate.tokenFuzzy, new EditDistance(query, distance)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java new file mode 100644 index 00000000000..ad7849633c6 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphNode.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * A {@link GraphNode} produced by a {@linkplain ReactiveGraphResultSet reactive graph result set}. + * + *

This is essentially an extension of the driver's {@link GraphNode} object that also exposes + * useful information about {@linkplain #getExecutionInfo() request execution} (note however that + * this information is also exposed at result set level for convenience). + * + * @see ReactiveGraphSession + * @see ReactiveGraphResultSet + */ +public interface ReactiveGraphNode extends GraphNode { + + /** + * The execution information for the paged request that produced this result. + * + *

This object is the same for two rows pertaining to the same page, but differs for rows + * pertaining to different pages. + * + * @return the execution information for the paged request that produced this result. + * @see ReactiveGraphResultSet#getExecutionInfos() + */ + @NonNull + ExecutionInfo getExecutionInfo(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java new file mode 100644 index 00000000000..a0e3231750e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphResultSet.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.reactivestreams.Publisher; + +/** + * A {@link Publisher} of {@link ReactiveGraphNode}s returned by a {@link ReactiveGraphSession}. + * + *

By default, all implementations returned by the driver are cold, unicast, single-subscriber + * only publishers. In other words, they do not support multiple subscriptions; consider + * caching the results produced by such publishers if you need to consume them by more than one + * downstream subscriber. + * + *

Also, note that reactive graph result sets may emit items to their subscribers on an internal + * driver IO thread. Subscriber implementors are encouraged to abide by Reactive Streams + * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside + * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down + * the driver and impact performance. Instead, they should asynchronously dispatch received signals + * to their processing logic. + * + *

This interface exists mainly to expose useful information about {@linkplain + * #getExecutionInfos() request execution}. This is particularly convenient for queries that do not + * return rows; for queries that do return rows, it is also possible, and oftentimes easier, to + * access that same information {@linkplain ReactiveGraphNode at node level}. + * + * @see ReactiveGraphSession#executeReactive(GraphStatement) + * @see ReactiveGraphNode + */ +public interface ReactiveGraphResultSet extends Publisher { + + /** + * Returns {@linkplain ExecutionInfo information about the execution} of all requests that have + * been performed so far to assemble this result set. + * + *

If the query is not paged, this publisher will emit exactly one item as soon as the response + * arrives, then complete. If the query is paged, it will emit multiple items, one per page; then + * it will complete when the last page arrives. If the query execution fails, then this publisher + * will fail with the same error. + * + *

By default, publishers returned by this method do not support multiple subscriptions. + * + * @see ReactiveGraphNode#getExecutionInfo() + */ + @NonNull + Publisher getExecutionInfos(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java new file mode 100644 index 00000000000..88f0e5def61 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/graph/reactive/ReactiveGraphSession.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.internal.core.graph.reactive.ReactiveGraphRequestProcessor; +import com.datastax.oss.driver.api.core.session.Session; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; + +/** + * A {@link Session} that offers utility methods to issue graph queries using reactive-style + * programming. + */ +public interface ReactiveGraphSession extends Session { + + /** + * Returns a {@link ReactiveGraphResultSet} that, once subscribed to, executes the given query and + * emits all the results. + * + *

See the javadocs of {@link ReactiveGraphResultSet} for important remarks anc caveats + * regarding the subscription to and consumption of reactive graph result sets. + * + * @param statement the statement to execute. + * @return The {@link ReactiveGraphResultSet} that will publish the returned results. + * @see ReactiveGraphResultSet + * @see ReactiveGraphNode + */ + @NonNull + default ReactiveGraphResultSet executeReactive(@NonNull GraphStatement statement) { + return Objects.requireNonNull( + execute(statement, ReactiveGraphRequestProcessor.REACTIVE_GRAPH_RESULT_SET)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java new file mode 100644 index 00000000000..88dbc164588 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata; + +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; + +/** The keys for the additional DSE-specific properties stored in {@link Node#getExtras()}. */ +public class DseNodeProperties { + + /** + * The DSE version that the node is running. + * + *

The associated value in {@link Node#getExtras()} is a {@link Version}). + */ + public static final String DSE_VERSION = "DSE_VERSION"; + + /** + * The value of the {@code server_id} field in the {@code peers} system table for this node. + * + *

This is the single identifier of the machine running a DSE instance. If DSE has been + * configured with Multi-Instance, the {@code server_id} helps identifying the single physical + * machine that runs the multiple DSE instances. If DSE is not configured with DSE Multi-Instance, + * the {@code server_id} will be automatically set and be unique for each node. + * + *

This information is only available if connecting to a DSE 6.0+ node. + * + *

The associated value in {@link Node#getExtras()} is a {@code String}). + * + * @see DSE + * Multi-Instance (DSE Administrator Guide) + * @see + * server_id (DSE Administrator Guide) + */ + public static final String SERVER_ID = "SERVER_ID"; + + /** + * The DSE workloads that the node is running. + * + *

This is based on the {@code workload} or {@code workloads} columns in {@code system.local} + * and {@code system.peers}. + * + *

Workload labels may vary depending on the DSE version in use; e.g. DSE 5.1 may report two + * distinct workloads: {@code Search} and {@code Analytics}, while DSE 5.0 would report a single + * {@code SearchAnalytics} workload instead. It is up to users to deal with such discrepancies; + * the driver simply returns the workload labels as reported by DSE, without any form of + * pre-processing (with the exception of Graph in DSE 5.0, which is stored in a separate column, + * but will be reported as {@code Graph} here). + * + *

The associated value in {@link Node#getExtras()} is an immutable {@code Set}. + */ + public static final String DSE_WORKLOADS = "DSE_WORKLOADS"; + + /** + * The port for the native transport connections on the DSE node. + * + *

The native transport port is {@code 9042} by default but can be changed on instances + * requiring specific firewall configurations. This can be configured in the {@code + * cassandra.yaml} configuration file under the {@code native_transport_port} property. + * + *

This information is only available if connecting the driver to a DSE 6.0+ node. + * + *

The associated value in {@link Node#getExtras()} is an {@code Integer}. + */ + public static final String NATIVE_TRANSPORT_PORT = "NATIVE_TRANSPORT_PORT"; + + /** + * The port for the encrypted native transport connections on the DSE node. + * + *

In most scenarios enabling client communications in DSE will result in using a single port + * that will only accept encrypted connections (by default the port {@code 9042} is reused since + * unencrypted connections are not allowed). + * + *

However, it is possible to configure DSE to use both encrypted and a non-encrypted + * communication ports with clients. In that case the port accepting encrypted connections will + * differ from the non-encrypted one (see {@link #NATIVE_TRANSPORT_PORT}) and will be exposed via + * this method. + * + *

This information is only available if connecting the driver to a DSE 6.0+ node. + * + *

The associated value in {@link Node#getExtras()} is an {@code Integer}. + */ + public static final String NATIVE_TRANSPORT_PORT_SSL = "NATIVE_TRANSPORT_PORT_SSL"; + + /** + * The storage port used by the DSE node. + * + *

The storage port is used for internal communication between the DSE server nodes. This port + * is never used by the driver. + * + *

This information is only available if connecting the driver to a DSE 6.0+ node. + * + *

The associated value in {@link Node#getExtras()} is an {@code Integer}. + */ + public static final String STORAGE_PORT = "STORAGE_PORT"; + + /** + * The encrypted storage port used by the DSE node. + * + *

If inter-node encryption is enabled on the DSE cluster, nodes will communicate securely + * between each other via this port. This port is never used by the driver. + * + *

This information is only available if connecting the driver to a DSE 6.0+ node. + * + *

The associated value in {@link Node#getExtras()} is an {@code Integer}. + */ + public static final String STORAGE_PORT_SSL = "STORAGE_PORT_SSL"; + + /** + * The JMX port used by this node. + * + *

The JMX port can be configured in the {@code cassandra-env.sh} configuration file separately + * on each node. + * + *

This information is only available if connecting the driver to a DSE 6.0+ node. + * + *

The associated value in {@link Node#getExtras()} is an {@code Integer}. + */ + public static final String JMX_PORT = "JMX_PORT"; +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java new file mode 100644 index 00000000000..609c64f7c15 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Optional; + +/** + * Specialized aggregate metadata for DSE. + * + *

It adds support for the DSE-specific {@link #getDeterministic() DETERMINISTIC} keyword. + */ +public interface DseAggregateMetadata extends AggregateMetadata { + + /** @deprecated Use {@link #getDeterministic()} instead. */ + @Deprecated + boolean isDeterministic(); + + /** + * Indicates if this aggregate is deterministic. A deterministic aggregate means that given a + * particular input, the aggregate will always produce the same output. + * + *

This method returns {@linkplain Optional#empty() empty} if this information was not found in + * the system tables, regardless of the actual aggregate characteristics; this is the case for all + * versions of DSE older than 6.0.0. + * + * @return Whether or not this aggregate is deterministic; or {@linkplain Optional#empty() empty} + * if such information is not available in the system tables. + */ + default Optional getDeterministic() { + return Optional.of(isDeterministic()); + } + + @NonNull + @Override + default String describe(boolean pretty) { + // Easiest to just copy the OSS describe() method and add in DETERMINISTIC + ScriptBuilder builder = new ScriptBuilder(pretty); + builder + .append("CREATE AGGREGATE ") + .append(getKeyspace()) + .append(".") + .append(getSignature().getName()) + .append("("); + boolean first = true; + for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { + if (first) { + first = false; + } else { + builder.append(","); + } + DataType type = getSignature().getParameterTypes().get(i); + builder.append(type.asCql(false, pretty)); + } + builder + .increaseIndent() + .append(")") + .newLine() + .append("SFUNC ") + .append(getStateFuncSignature().getName()) + .newLine() + .append("STYPE ") + .append(getStateType().asCql(false, pretty)); + + if (getFinalFuncSignature().isPresent()) { + builder.newLine().append("FINALFUNC ").append(getFinalFuncSignature().get().getName()); + } + if (getInitCond().isPresent()) { + Optional formatInitCond = formatInitCond(); + assert formatInitCond.isPresent(); + builder.newLine().append("INITCOND ").append(formatInitCond.get()); + } + // add DETERMINISTIC if present + if (getDeterministic().orElse(false)) { + builder.newLine().append("DETERMINISTIC"); + } + return builder.append(";").build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java new file mode 100644 index 00000000000..62b5650697e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseColumnMetadata.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; + +/** + * Specialized column metadata for DSE. + * + *

This type exists only for future extensibility; currently, it is identical to {@link + * ColumnMetadata}. + */ +public interface DseColumnMetadata extends ColumnMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java new file mode 100644 index 00000000000..59ee8a277ff --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseEdgeMetadata.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; + +/** Edge metadata, for a table that was created with CREATE TABLE ... WITH EDGE LABEL. */ +public interface DseEdgeMetadata { + + /** The label of the edge in graph. */ + @NonNull + CqlIdentifier getLabelName(); + + /** The identifier of the table representing the incoming vertex. */ + @NonNull + CqlIdentifier getFromTable(); + + /** The label of the incoming vertex in graph. */ + @NonNull + CqlIdentifier getFromLabel(); + + /** The columns in this table that match the partition key of the incoming vertex table. */ + @NonNull + List getFromPartitionKeyColumns(); + + /** The columns in this table that match the clustering columns of the incoming vertex table. */ + @NonNull + List getFromClusteringColumns(); + + /** The identifier of the table representing the outgoing vertex. */ + @NonNull + CqlIdentifier getToTable(); + + /** The label of the outgoing vertex in graph. */ + @NonNull + CqlIdentifier getToLabel(); + + /** The columns in this table that match the partition key of the outgoing vertex table. */ + @NonNull + List getToPartitionKeyColumns(); + + /** The columns in this table that match the clustering columns of the outgoing vertex table. */ + @NonNull + List getToClusteringColumns(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java new file mode 100644 index 00000000000..91298795959 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; +import java.util.Optional; + +/** + * Specialized function metadata for DSE. + * + *

It adds support for the DSE-specific {@link #getDeterministic() DETERMINISTIC} and {@link + * #getMonotonicity() MONOTONIC} keywords. + */ +public interface DseFunctionMetadata extends FunctionMetadata { + + /** The monotonicity of a function. */ + enum Monotonicity { + + /** + * Indicates that the function is fully monotonic on all of its arguments. This means that it is + * either entirely non-increasing or non-decreasing. Full monotonicity is required to use the + * function in a GROUP BY clause. + */ + FULLY_MONOTONIC, + + /** + * Indicates that the function is partially monotonic, meaning that partial application over + * some of the its arguments is monotonic. Currently (DSE 6.0.0), CQL only allows partial + * monotonicity on exactly one argument. This may change in a future CQL version. + */ + PARTIALLY_MONOTONIC, + + /** Indicates that the function is not monotonic. */ + NOT_MONOTONIC, + } + + /** @deprecated Use {@link #getDeterministic()} instead. */ + @Deprecated + boolean isDeterministic(); + + /** + * Indicates if this function is deterministic. A deterministic function means that given a + * particular input, the function will always produce the same output. + * + *

This method returns {@linkplain Optional#empty() empty} if this information was not found in + * the system tables, regardless of the actual function characteristics; this is the case for all + * versions of DSE older than 6.0.0. + * + * @return Whether or not this function is deterministic; or {@linkplain Optional#empty() empty} + * if such information is not available in the system tables. + */ + default Optional getDeterministic() { + return Optional.of(isDeterministic()); + } + + /** @deprecated use {@link #getMonotonicity()} instead. */ + @Deprecated + boolean isMonotonic(); + + /** + * Returns this function's {@link Monotonicity}. + * + *

A function can be either: + * + *

    + *
  • fully monotonic. In that case, this method returns {@link Monotonicity#FULLY_MONOTONIC}, + * and {@link #getMonotonicArgumentNames()} returns all the arguments; + *
  • partially monotonic, meaning that partial application over some of the arguments is + * monotonic. Currently (DSE 6.0.0), CQL only allows partial monotonicity on exactly one + * argument. This may change in a future CQL version. In that case, this method returns + * {@link Monotonicity#PARTIALLY_MONOTONIC}, and {@link #getMonotonicArgumentNames()} + * returns a singleton list; + *
  • not monotonic. In that case, this method return {@link Monotonicity#NOT_MONOTONIC} and + * {@link #getMonotonicArgumentNames()} returns an empty list. + *
+ * + *

Full monotonicity is required to use the function in a GROUP BY clause. + * + *

This method returns {@linkplain Optional#empty() empty} if this information was not found in + * the system tables, regardless of the actual function characteristics; this is the case for all + * versions of DSE older than 6.0.0. + * + * @return this function's {@link Monotonicity}; or {@linkplain Optional#empty() empty} if such + * information is not available in the system tables. + */ + default Optional getMonotonicity() { + return Optional.of( + isMonotonic() + ? Monotonicity.FULLY_MONOTONIC + : getMonotonicArgumentNames().isEmpty() + ? Monotonicity.NOT_MONOTONIC + : Monotonicity.PARTIALLY_MONOTONIC); + } + + /** + * Returns a list of argument names that are monotonic. + * + *

See {@link #getMonotonicity()} for explanations on monotonicity, and the possible values + * returned by this method. + * + *

NOTE: For versions of DSE older than 6.0.0, this method will always return an empty list, + * regardless of the actual function characteristics. + * + * @return the argument names that the function is monotonic on. + */ + @NonNull + List getMonotonicArgumentNames(); + + @NonNull + @Override + default String describe(boolean pretty) { + ScriptBuilder builder = new ScriptBuilder(pretty); + builder + .append("CREATE FUNCTION ") + .append(getKeyspace()) + .append(".") + .append(getSignature().getName()) + .append("("); + boolean first = true; + for (int i = 0; i < getSignature().getParameterTypes().size(); i++) { + if (first) { + first = false; + } else { + builder.append(","); + } + DataType type = getSignature().getParameterTypes().get(i); + CqlIdentifier name = getParameterNames().get(i); + builder.append(name).append(" ").append(type.asCql(false, pretty)); + } + builder + .append(")") + .increaseIndent() + .newLine() + .append(isCalledOnNullInput() ? "CALLED ON NULL INPUT" : "RETURNS NULL ON NULL INPUT") + .newLine() + .append("RETURNS ") + .append(getReturnType().asCql(false, true)) + .newLine(); + // handle deterministic and monotonic + if (getDeterministic().orElse(false)) { + builder.append("DETERMINISTIC").newLine(); + } + if (getMonotonicity().isPresent()) { + switch (getMonotonicity().get()) { + case FULLY_MONOTONIC: + builder.append("MONOTONIC").newLine(); + break; + case PARTIALLY_MONOTONIC: + builder.append("MONOTONIC ON ").append(getMonotonicArgumentNames().get(0)).newLine(); + break; + default: + break; + } + } + builder + .append("LANGUAGE ") + .append(getLanguage()) + .newLine() + .append("AS '") + .append(getBody()) + .append("';"); + return builder.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java new file mode 100644 index 00000000000..8978a8858f9 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphKeyspaceMetadata.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Optional; + +/** + * Specialized keyspace metadata, that handles the graph-specific properties introduced in DSE 6.8. + * + *

This type only exists to avoid breaking binary compatibility. When the driver is connected to + * a DSE cluster, all the {@link KeyspaceMetadata} instances it returns can be safely downcast to + * this interface. + */ +public interface DseGraphKeyspaceMetadata extends DseKeyspaceMetadata { + + /** The graph engine that will be used to interpret this keyspace. */ + @NonNull + Optional getGraphEngine(); + + @NonNull + @Override + default String describe(boolean pretty) { + ScriptBuilder builder = new ScriptBuilder(pretty); + if (isVirtual()) { + builder.append("/* VIRTUAL "); + } else { + builder.append("CREATE "); + } + builder + .append("KEYSPACE ") + .append(getName()) + .append(" WITH replication = { 'class' : '") + .append(getReplication().get("class")) + .append("'"); + for (Map.Entry entry : getReplication().entrySet()) { + if (!entry.getKey().equals("class")) { + builder + .append(", '") + .append(entry.getKey()) + .append("': '") + .append(entry.getValue()) + .append("'"); + } + } + builder.append(" } AND durable_writes = ").append(Boolean.toString(isDurableWrites())); + getGraphEngine() + .ifPresent( + graphEngine -> builder.append(" AND graph_engine ='").append(graphEngine).append("'")); + builder.append(";"); + if (isVirtual()) { + builder.append(" */"); + } + return builder.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java new file mode 100644 index 00000000000..8f340b3b447 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseGraphTableMetadata.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.dse.driver.internal.core.metadata.schema.ScriptHelper; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Optional; + +/** + * Specialized table metadata, that handles the graph-specific properties introduced in DSE 6.8. + * + *

This type only exists to avoid breaking binary compatibility. When the driver is connected to + * a DSE cluster, all the {@link TableMetadata} instances it returns can be safely downcast to this + * interface. + */ +public interface DseGraphTableMetadata extends DseTableMetadata { + /** + * The vertex metadata if this table represents a vertex in graph, otherwise empty. + * + *

This is mutually exclusive with {@link #getEdge()}. + */ + @NonNull + Optional getVertex(); + + /** + * The edge metadata if this table represents an edge in graph, otherwise empty. + * + *

This is mutually exclusive with {@link #getVertex()}. + */ + @NonNull + Optional getEdge(); + + @NonNull + @Override + default String describe(boolean pretty) { + ScriptBuilder builder = new ScriptBuilder(pretty); + if (isVirtual()) { + builder.append("/* VIRTUAL "); + } else { + builder.append("CREATE "); + } + + builder + .append("TABLE ") + .append(getKeyspace()) + .append(".") + .append(getName()) + .append(" (") + .newLine() + .increaseIndent(); + + for (ColumnMetadata column : getColumns().values()) { + builder.append(column.getName()).append(" ").append(column.getType().asCql(true, pretty)); + if (column.isStatic()) { + builder.append(" static"); + } + builder.append(",").newLine(); + } + + // PK + builder.append("PRIMARY KEY ("); + if (getPartitionKey().size() == 1) { // PRIMARY KEY (k + builder.append(getPartitionKey().get(0).getName()); + } else { // PRIMARY KEY ((k1, k2) + builder.append("("); + boolean first = true; + for (ColumnMetadata pkColumn : getPartitionKey()) { + if (first) { + first = false; + } else { + builder.append(", "); + } + builder.append(pkColumn.getName()); + } + builder.append(")"); + } + // PRIMARY KEY (, cc1, cc2, cc3) + for (ColumnMetadata clusteringColumn : getClusteringColumns().keySet()) { + builder.append(", ").append(clusteringColumn.getName()); + } + builder.append(")"); + + builder.newLine().decreaseIndent().append(")"); + + builder.increaseIndent(); + if (isCompactStorage()) { + builder.andWith().append("COMPACT STORAGE"); + } + if (getClusteringColumns().containsValue(ClusteringOrder.DESC)) { + builder.andWith().append("CLUSTERING ORDER BY ("); + boolean first = true; + for (Map.Entry entry : + getClusteringColumns().entrySet()) { + if (first) { + first = false; + } else { + builder.append(", "); + } + builder.append(entry.getKey().getName()).append(" ").append(entry.getValue().name()); + } + builder.append(")"); + } + getVertex() + .ifPresent( + vertex -> { + builder.andWith().append("VERTEX LABEL").append(" ").append(vertex.getLabelName()); + }); + getEdge() + .ifPresent( + edge -> { + builder.andWith().append("EDGE LABEL").append(" ").append(edge.getLabelName()); + ScriptHelper.appendEdgeSide( + builder, + edge.getFromTable(), + edge.getFromLabel(), + edge.getFromPartitionKeyColumns(), + edge.getFromClusteringColumns(), + "FROM"); + ScriptHelper.appendEdgeSide( + builder, + edge.getToTable(), + edge.getToLabel(), + edge.getToPartitionKeyColumns(), + edge.getToClusteringColumns(), + "TO"); + }); + Map options = getOptions(); + RelationParser.appendOptions(options, builder); + builder.append(";"); + if (isVirtual()) { + builder.append(" */"); + } + return builder.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java new file mode 100644 index 00000000000..ac4c1057fbf --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseIndexMetadata.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; + +/** + * Specialized index metadata for DSE. + * + *

This type exists only for future extensibility; currently, it is identical to {@link + * IndexMetadata}. + */ +public interface DseIndexMetadata extends IndexMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java new file mode 100644 index 00000000000..bc5cb002802 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseKeyspaceMetadata.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; + +/** + * Specialized keyspace metadata for DSE. + * + *

Notes: + * + *

    + *
  • this type can always be safely downcast to {@link DseGraphKeyspaceMetadata} (the only + * reason the two interfaces are separate is for backward compatibility). + *
  • all returned elements can be cast to their DSE counterparts, for example {@link + * TableMetadata} to {@link DseTableMetadata}. + *
+ */ +public interface DseKeyspaceMetadata extends KeyspaceMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java new file mode 100644 index 00000000000..55b36cb7fe5 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseRelationMetadata.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; + +/** + * Specialized table or materialized view metadata for DSE. + * + *

This type exists only for future extensibility; currently, it is identical to {@link + * RelationMetadata}. + * + *

Note that all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}. + */ +public interface DseRelationMetadata extends RelationMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java new file mode 100644 index 00000000000..a140f93bc2e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseTableMetadata.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; + +/** + * Specialized table metadata for DSE. + * + *

Notes: + * + *

    + *
  • this type can always be safely downcast to {@link DseGraphTableMetadata} (the only reason + * the two interfaces are separate is for backward compatibility). + *
  • all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}, and all + * {@link IndexMetadata} to {@link DseIndexMetadata}. + *
+ */ +public interface DseTableMetadata extends DseRelationMetadata, TableMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java new file mode 100644 index 00000000000..c08a7eb1d60 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseVertexMetadata.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** Vertex metadata, for a table that was created with CREATE TABLE ... WITH VERTEX LABEL. */ +public interface DseVertexMetadata { + + /** The label of the vertex in graph. */ + @NonNull + CqlIdentifier getLabelName(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java new file mode 100644 index 00000000000..0f68ea7e456 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metadata/schema/DseViewMetadata.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; + +/** + * Specialized materialized view metadata for DSE. + * + *

This type exists only for future extensibility; currently, it is identical to {@link + * ViewMetadata}. + * + *

Note that all returned {@link ColumnMetadata} can be cast to {@link DseColumnMetadata}. + */ +public interface DseViewMetadata extends DseRelationMetadata, ViewMetadata {} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java new file mode 100644 index 00000000000..cf4b4d0aa18 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseNodeMetric.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metrics; + +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +/** See {@code reference.conf} for a description of each metric. */ +public enum DseNodeMetric implements NodeMetric { + GRAPH_MESSAGES("graph-messages"); + + private static final Map BY_PATH = sortByPath(); + + private final String path; + + DseNodeMetric(String path) { + this.path = path; + } + + @Override + @NonNull + public String getPath() { + return path; + } + + @NonNull + public static DseNodeMetric fromPath(@NonNull String path) { + DseNodeMetric metric = BY_PATH.get(path); + if (metric == null) { + throw new IllegalArgumentException("Unknown node metric path " + path); + } + return metric; + } + + private static Map sortByPath() { + ImmutableMap.Builder result = ImmutableMap.builder(); + for (DseNodeMetric value : values()) { + result.put(value.getPath(), value); + } + return result.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java new file mode 100644 index 00000000000..79584f3c44a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/metrics/DseSessionMetric.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metrics; + +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +/** See {@code reference.conf} for a description of each metric. */ +public enum DseSessionMetric implements SessionMetric { + CONTINUOUS_CQL_REQUESTS("continuous-cql-requests"), + GRAPH_REQUESTS("graph-requests"), + GRAPH_CLIENT_TIMEOUTS("graph-client-timeouts"), + ; + + private static final Map BY_PATH = sortByPath(); + + private final String path; + + DseSessionMetric(String path) { + this.path = path; + } + + @NonNull + @Override + public String getPath() { + return path; + } + + @NonNull + public static DseSessionMetric fromPath(@NonNull String path) { + DseSessionMetric metric = BY_PATH.get(path); + if (metric == null) { + throw new IllegalArgumentException("Unknown DSE session metric path " + path); + } + return metric; + } + + private static Map sortByPath() { + ImmutableMap.Builder result = ImmutableMap.builder(); + for (DseSessionMetric value : values()) { + result.put(value.getPath(), value); + } + return result.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java b/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java new file mode 100644 index 00000000000..8bf4d80699d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/servererrors/UnfitClientException.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.servererrors; + +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * A server-side error triggered when DSE can't send asynchronous results back to the client. + * + *

Currently, this is used when the client is unable to keep up with the rate during a continuous + * paging session. + * + *

Note that the protocol specification refers to this error as {@code CLIENT_WRITE_FAILURE}; we + * don't follow that terminology because it would be too misleading (this is not a client error, and + * it doesn't occur while writing data to DSE). + */ +public class UnfitClientException extends CoordinatorException { + + public UnfitClientException(@NonNull Node coordinator, @NonNull String message) { + this(coordinator, message, null, false); + } + + private UnfitClientException( + @NonNull Node coordinator, + @NonNull String message, + @Nullable ExecutionInfo executionInfo, + boolean writableStackTrace) { + super(coordinator, message, executionInfo, writableStackTrace); + } + + @Override + @NonNull + public UnfitClientException copy() { + return new UnfitClientException(getCoordinator(), getMessage(), getExecutionInfo(), true); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java b/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java new file mode 100644 index 00000000000..6003274e09a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/type/DseDataTypes.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.type; + +import com.datastax.oss.driver.api.core.type.CustomType; +import com.datastax.oss.driver.api.core.type.DataTypes; + +/** Extends {@link DataTypes} to handle DSE-specific types. */ +public class DseDataTypes extends DataTypes { + + public static final CustomType LINE_STRING = + (CustomType) custom("org.apache.cassandra.db.marshal.LineStringType"); + + public static final CustomType POINT = + (CustomType) custom("org.apache.cassandra.db.marshal.PointType"); + + public static final CustomType POLYGON = + (CustomType) custom("org.apache.cassandra.db.marshal.PolygonType"); + + public static final CustomType DATE_RANGE = + (CustomType) custom("org.apache.cassandra.db.marshal.DateRangeType"); +} diff --git a/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java b/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java new file mode 100644 index 00000000000..fb0225970b4 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/api/core/type/codec/DseTypeCodecs.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.type.codec; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.data.time.DateRange; +import com.datastax.dse.driver.internal.core.type.codec.geometry.LineStringCodec; +import com.datastax.dse.driver.internal.core.type.codec.geometry.PointCodec; +import com.datastax.dse.driver.internal.core.type.codec.geometry.PolygonCodec; +import com.datastax.dse.driver.internal.core.type.codec.time.DateRangeCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; + +/** Extends {@link TypeCodecs} to handle DSE-specific types. */ +public class DseTypeCodecs extends TypeCodecs { + + public static final TypeCodec LINE_STRING = new LineStringCodec(); + + public static final TypeCodec POINT = new PointCodec(); + + public static final TypeCodec POLYGON = new PolygonCodec(); + + public static final TypeCodec DATE_RANGE = new DateRangeCodec(); +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java b/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java new file mode 100644 index 00000000000..95f245061d2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/DseProtocolFeature.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core; + +import com.datastax.oss.driver.internal.core.ProtocolFeature; + +/** + * Features that are supported by DataStax Enterprise (DSE) protocol versions. + * + * @see com.datastax.dse.driver.api.core.DseProtocolVersion + * @see com.datastax.oss.driver.internal.core.DefaultProtocolFeature + */ +public enum DseProtocolFeature implements ProtocolFeature { + + /** + * The ability to execute continuous paging requests. + * + * @see CASSANDRA-11521 + * @see com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession + */ + CONTINUOUS_PAGING, + ; +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java b/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java new file mode 100644 index 00000000000..e4dd6f93bf7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/InsightsClientLifecycleListener.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core; + +import static com.datastax.dse.driver.api.core.config.DseDriverOption.MONITOR_REPORTING_ENABLED; + +import com.datastax.dse.driver.internal.core.insights.InsightsClient; +import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.context.LifecycleListener; + +public class InsightsClientLifecycleListener implements LifecycleListener { + private static final boolean DEFAULT_INSIGHTS_ENABLED = true; + private static final long STATUS_EVENT_DELAY_MILLIS = 300000L; + private final InternalDriverContext context; + private final StackTraceElement[] initCallStackTrace; + private volatile InsightsClient insightsClient; + + public InsightsClientLifecycleListener( + InternalDriverContext context, StackTraceElement[] initCallStackTrace) { + this.context = context; + this.initCallStackTrace = initCallStackTrace; + } + + @Override + public void onSessionReady() { + boolean monitorReportingEnabled = + context + .getConfig() + .getDefaultProfile() + .getBoolean(MONITOR_REPORTING_ENABLED, DEFAULT_INSIGHTS_ENABLED); + + this.insightsClient = + InsightsClient.createInsightsClient( + new InsightsConfiguration( + monitorReportingEnabled, + STATUS_EVENT_DELAY_MILLIS, + context.getNettyOptions().adminEventExecutorGroup().next()), + context, + initCallStackTrace); + insightsClient.sendStartupMessage(); + insightsClient.scheduleStatusMessageSend(); + } + + @Override + public void close() { + if (insightsClient != null) { + insightsClient.shutdown(); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java new file mode 100644 index 00000000000..38f1644bcb7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/AuthUtils.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.auth; + +import com.datastax.oss.driver.api.core.auth.AuthenticationException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import java.util.ArrayList; +import java.util.List; + +public class AuthUtils { + /** + * Utility function that checks for the existence of settings and throws an exception if they + * aren't present + * + * @param config Current working driver configuration + * @param authenticatorName name of authenticator for logging purposes + * @param endPoint the host we are attempting to authenticate to + * @param options a list of DriverOptions to check to see if they are present + */ + public static void validateConfigPresent( + DriverExecutionProfile config, + String authenticatorName, + EndPoint endPoint, + DriverOption... options) { + List missingOptions = new ArrayList<>(); + for (DriverOption option : options) { + + if (!config.isDefined(option)) { + missingOptions.add(option); + } + if (missingOptions.size() > 0) { + String message = + "Missing required configuration options for authenticator " + authenticatorName + ":"; + for (DriverOption missingOption : missingOptions) { + message = message + " " + missingOption.getPath(); + } + throw new AuthenticationException(endPoint, message); + } + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java new file mode 100644 index 00000000000..6ef6596a870 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DseGssApiAuthProvider.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.auth; + +import com.datastax.dse.driver.api.core.auth.DseGssApiAuthProviderBase; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import net.jcip.annotations.ThreadSafe; + +/** + * {@link AuthProvider} that provides GSSAPI authenticator instances for clients to connect to DSE + * clusters secured with {@code DseAuthenticator}. + * + *

To activate this provider an {@code auth-provider} section must be included in the driver + * configuration, for example: + * + *

+ * dse-java-driver {
+ *  auth-provider {
+ *      class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
+ *      login-configuration {
+ *          principal = "user principal here ex cassandra@DATASTAX.COM"
+ *          useKeyTab = "true"
+ *          refreshKrb5Config = "true"
+ *          keyTab = "Path to keytab file here"
+ *      }
+ *   }
+ * }
+ * 
+ * + *

Kerberos Authentication

+ * + * Keytab and ticket cache settings are specified using a standard JAAS configuration file. The + * location of the file can be set using the java.security.auth.login.config system + * property or by adding a login.config.url.n entry in the java.security + * properties file. Alternatively a login-configuration section can be included in the driver + * configuration. + * + *

See the following documents for further details: + * + *

    + *
  1. JAAS + * Login Configuration File; + *
  2. Krb5LoginModule + * options; + *
  3. JAAS + * Authentication Tutorial for more on JAAS in general. + *
+ * + *

Authentication using ticket cache

+ * + * Run kinit to obtain a ticket and populate the cache before connecting. JAAS config: + * + *
+ * DseClient {
+ *   com.sun.security.auth.module.Krb5LoginModule required
+ *     useTicketCache=true
+ *     renewTGT=true;
+ * };
+ * 
+ * + *

Authentication using a keytab file

+ * + * To enable authentication using a keytab file, specify its location on disk. If your keytab + * contains more than one principal key, you should also specify which one to select. This + * information can also be specified in the driver config, under the login-configuration section. + * + *
+ * DseClient {
+ *     com.sun.security.auth.module.Krb5LoginModule required
+ *       useKeyTab=true
+ *       keyTab="/path/to/file.keytab"
+ *       principal="user@MYDOMAIN.COM";
+ * };
+ * 
+ * + *

Specifying SASL protocol name

+ * + * The SASL protocol name used by this auth provider defaults to " + * {@value #DEFAULT_SASL_SERVICE_NAME}". + * + *

Important: the SASL protocol name should match the username of the Kerberos + * service principal used by the DSE server. This information is specified in the dse.yaml file by + * the {@code service_principal} option under the kerberos_options + * section, and may vary from one DSE installation to another – especially if you installed + * DSE with an automated package installer. + * + *

For example, if your dse.yaml file contains the following: + * + *

{@code
+ * kerberos_options:
+ *     ...
+ *     service_principal: cassandra/my.host.com@MY.REALM.COM
+ * }
+ * + * The correct SASL protocol name to use when authenticating against this DSE server is "{@code + * cassandra}". + * + *

Should you need to change the SASL protocol name, use one of the methods below: + * + *

    + *
  1. Specify the service name in the driver config. + *
    + * dse-java-driver {
    + *   auth-provider {
    + *     class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
    + *     service = "alternate"
    + *   }
    + * }
    + * 
    + *
  2. Specify the service name with the {@code dse.sasl.service} system property when starting + * your application, e.g. {@code -Ddse.sasl.service=cassandra}. + *
+ * + * If a non-null SASL service name is provided to the aforementioned config, that name takes + * precedence over the contents of the {@code dse.sasl.service} system property. + * + *

Should internal sasl properties need to be set such as qop. This can be accomplished by + * including a sasl-properties in the driver config, for example: + * + *

+ * dse-java-driver {
+ *   auth-provider {
+ *     class = com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider
+ *     sasl-properties {
+ *       javax.security.sasl.qop = "auth-conf"
+ *     }
+ *   }
+ * }
+ * 
+ */ +@ThreadSafe +public class DseGssApiAuthProvider extends DseGssApiAuthProviderBase { + + private final DriverExecutionProfile config; + + public DseGssApiAuthProvider(DriverContext context) { + super(context.getSessionName()); + + this.config = context.getConfig().getDefaultProfile(); + } + + @NonNull + @Override + protected GssApiOptions getOptions( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { + // A login configuration is always necessary, throw an exception if that option is missing. + AuthUtils.validateConfigPresent( + config, + DseGssApiAuthProvider.class.getName(), + endPoint, + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION); + + GssApiOptions.Builder optionsBuilder = GssApiOptions.builder(); + + if (config.isDefined(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID)) { + optionsBuilder.withAuthorizationId( + config.getString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID)); + } + if (config.isDefined(DseDriverOption.AUTH_PROVIDER_SERVICE)) { + optionsBuilder.withSaslProtocol(config.getString(DseDriverOption.AUTH_PROVIDER_SERVICE)); + } + if (config.isDefined(DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES)) { + for (Map.Entry entry : + config.getStringMap(DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES).entrySet()) { + optionsBuilder.addSaslProperty(entry.getKey(), entry.getValue()); + } + } + Map loginConfigurationMap = + config.getStringMap(DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION); + optionsBuilder.withLoginConfiguration(loginConfigurationMap); + return optionsBuilder.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java new file mode 100644 index 00000000000..6cf82aef03e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/auth/DsePlainTextAuthProvider.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.auth; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; +import net.jcip.annotations.ThreadSafe; + +/** + * @deprecated The driver's default plain text providers now support both Apache Cassandra and DSE. + * This type was preserved for backward compatibility, but {@link PlainTextAuthProvider} should + * be used instead. + */ +@ThreadSafe +@Deprecated +public class DsePlainTextAuthProvider extends PlainTextAuthProvider { + + public DsePlainTextAuthProvider(DriverContext context) { + super(context); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java new file mode 100644 index 00000000000..15aab143150 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/DseConversions.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.servererrors.UnfitClientException; +import com.datastax.dse.protocol.internal.DseProtocolConstants; +import com.datastax.dse.protocol.internal.request.query.ContinuousPagingOptions; +import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; +import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; +import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.request.Execute; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.protocol.internal.response.Error; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class DseConversions { + + public static Message toContinuousPagingMessage( + Statement statement, DriverExecutionProfile config, InternalDriverContext context) { + ConsistencyLevelRegistry consistencyLevelRegistry = context.getConsistencyLevelRegistry(); + ConsistencyLevel consistency = statement.getConsistencyLevel(); + int consistencyCode = + (consistency == null) + ? consistencyLevelRegistry.nameToCode( + config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + : consistency.getProtocolCode(); + int pageSize = config.getInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE); + boolean pageSizeInBytes = config.getBoolean(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES); + int maxPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES); + int maxPagesPerSecond = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND); + int maxEnqueuedPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); + ContinuousPagingOptions options = + new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages); + ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel(); + int serialConsistencyCode = + (serialConsistency == null) + ? consistencyLevelRegistry.nameToCode( + config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) + : serialConsistency.getProtocolCode(); + long timestamp = statement.getQueryTimestamp(); + if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { + timestamp = context.getTimestampGenerator().next(); + } + CodecRegistry codecRegistry = context.getCodecRegistry(); + ProtocolVersion protocolVersion = context.getProtocolVersion(); + ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); + CqlIdentifier keyspace = statement.getKeyspace(); + if (statement instanceof SimpleStatement) { + SimpleStatement simpleStatement = (SimpleStatement) statement; + List positionalValues = simpleStatement.getPositionalValues(); + Map namedValues = simpleStatement.getNamedValues(); + if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { + throw new IllegalArgumentException( + "Can't have both positional and named values in a statement."); + } + if (keyspace != null + && !protocolVersionRegistry.supports( + protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { + throw new IllegalArgumentException( + "Can't use per-request keyspace with protocol " + protocolVersion); + } + DseQueryOptions queryOptions = + new DseQueryOptions( + consistencyCode, + Conversions.encode(positionalValues, codecRegistry, protocolVersion), + Conversions.encode(namedValues, codecRegistry, protocolVersion), + false, + pageSize, + statement.getPagingState(), + serialConsistencyCode, + timestamp, + (keyspace == null) ? null : keyspace.asInternal(), + pageSizeInBytes, + options); + return new Query(simpleStatement.getQuery(), queryOptions); + } else if (statement instanceof BoundStatement) { + BoundStatement boundStatement = (BoundStatement) statement; + if (!protocolVersionRegistry.supports( + protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) { + Conversions.ensureAllSet(boundStatement); + } + boolean skipMetadata = + boundStatement.getPreparedStatement().getResultSetDefinitions().size() > 0; + DseQueryOptions queryOptions = + new DseQueryOptions( + consistencyCode, + boundStatement.getValues(), + Collections.emptyMap(), + skipMetadata, + pageSize, + statement.getPagingState(), + serialConsistencyCode, + timestamp, + null, + pageSizeInBytes, + options); + PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); + ByteBuffer id = preparedStatement.getId(); + ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId(); + return new Execute( + Bytes.getArray(id), + (resultMetadataId == null) ? null : Bytes.getArray(resultMetadataId), + queryOptions); + } else { + throw new IllegalArgumentException( + "Unsupported statement type: " + statement.getClass().getName()); + } + } + + public static CoordinatorException toThrowable( + Node node, Error errorMessage, InternalDriverContext context) { + switch (errorMessage.code) { + case DseProtocolConstants.ErrorCode.CLIENT_WRITE_FAILURE: + return new UnfitClientException(node, errorMessage.message); + default: + return Conversions.toThrowable(node, errorMessage, context); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java new file mode 100644 index 00000000000..8a098bf2895 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestAsyncProcessor.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class ContinuousCqlRequestAsyncProcessor + implements RequestProcessor, CompletionStage> { + + public static final GenericType> + CONTINUOUS_RESULT_ASYNC = new GenericType>() {}; + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return request instanceof Statement && resultType.equals(CONTINUOUS_RESULT_ASYNC); + } + + @Override + public CompletionStage process( + Statement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + return new ContinuousCqlRequestHandler(request, session, context, sessionLogPrefix).handle(); + } + + @Override + public CompletionStage newFailure(RuntimeException error) { + return CompletableFutures.failedFuture(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java new file mode 100644 index 00000000000..dd308c11854 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandler.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.dse.driver.internal.core.cql.DseConversions; +import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.cql.DefaultRow; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.response.result.Rows; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import net.jcip.annotations.ThreadSafe; + +/** + * Handles a request that supports multiple response messages (a.k.a. continuous paging request). + */ +@ThreadSafe +public class ContinuousCqlRequestHandler + extends ContinuousRequestHandlerBase, ContinuousAsyncResultSet> { + + ContinuousCqlRequestHandler( + @NonNull Statement statement, + @NonNull DefaultSession session, + @NonNull InternalDriverContext context, + @NonNull String sessionLogPrefix) { + super( + statement, + session, + context, + sessionLogPrefix, + ContinuousAsyncResultSet.class, + false, + DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, + DseSessionMetric.CONTINUOUS_CQL_REQUESTS, + DefaultNodeMetric.CQL_MESSAGES); + // NOTE that ordering of the following statement matters. + // We should register this request after all fields have been initialized. + throttler.register(this); + } + + @NonNull + @Override + protected Duration getGlobalTimeout() { + return Duration.ZERO; + } + + @NonNull + @Override + protected Duration getPageTimeout(@NonNull Statement statement, int pageNumber) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + if (pageNumber == 1) { + return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE); + } else { + return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES); + } + } + + @NonNull + @Override + protected Duration getReviseRequestTimeout(@NonNull Statement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return executionProfile.getDuration(DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES); + } + + @Override + protected int getMaxEnqueuedPages(@NonNull Statement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return executionProfile.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); + } + + @Override + protected int getMaxPages(@NonNull Statement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return executionProfile.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES); + } + + @NonNull + @Override + protected Message getMessage(@NonNull Statement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return DseConversions.toContinuousPagingMessage(statement, executionProfile, context); + } + + @Override + protected boolean isTracingEnabled(@NonNull Statement statement) { + return false; + } + + @NonNull + @Override + protected Map createPayload(@NonNull Statement statement) { + return statement.getCustomPayload(); + } + + @NonNull + @Override + protected ContinuousAsyncResultSet createEmptyResultSet(@NonNull ExecutionInfo executionInfo) { + return DefaultContinuousAsyncResultSet.empty(executionInfo); + } + + @NonNull + @Override + protected DefaultContinuousAsyncResultSet createResultSet( + @NonNull Statement statement, + @NonNull Rows rows, + @NonNull ExecutionInfo executionInfo, + @NonNull ColumnDefinitions columnDefinitions) { + Queue> data = rows.getData(); + CountingIterator iterator = + new CountingIterator(data.size()) { + @Override + protected Row computeNext() { + List rowData = data.poll(); + return (rowData == null) + ? endOfData() + : new DefaultRow(columnDefinitions, rowData, context); + } + }; + DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); + return new DefaultContinuousAsyncResultSet( + iterator, + columnDefinitions, + metadata.continuousPageNumber, + !metadata.isLastContinuousPage, + executionInfo, + this); + } + + @Override + protected int pageNumber(@NonNull ContinuousAsyncResultSet resultSet) { + return resultSet.pageNumber(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java new file mode 100644 index 00000000000..f151eb7eae2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestSyncProcessor.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class ContinuousCqlRequestSyncProcessor + implements RequestProcessor, ContinuousResultSet> { + + public static final GenericType CONTINUOUS_RESULT_SYNC = + GenericType.of(ContinuousResultSet.class); + + private final ContinuousCqlRequestAsyncProcessor asyncProcessor; + + public ContinuousCqlRequestSyncProcessor(ContinuousCqlRequestAsyncProcessor asyncProcessor) { + this.asyncProcessor = asyncProcessor; + } + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return request instanceof Statement && resultType.equals(CONTINUOUS_RESULT_SYNC); + } + + @Override + public ContinuousResultSet process( + Statement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + BlockingOperation.checkNotDriverThread(); + ContinuousAsyncResultSet firstPage = + CompletableFutures.getUninterruptibly( + asyncProcessor.process(request, session, context, sessionLogPrefix)); + return new DefaultContinuousResultSet(firstPage); + } + + @Override + public ContinuousResultSet newFailure(RuntimeException error) { + throw error; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java new file mode 100644 index 00000000000..0453022cb6a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousRequestHandlerBase.java @@ -0,0 +1,1645 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.driver.internal.core.DseProtocolFeature; +import com.datastax.dse.driver.internal.core.cql.DseConversions; +import com.datastax.dse.protocol.internal.request.Revise; +import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.RequestThrottlingException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.connection.FrameTooLongException; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; +import com.datastax.oss.driver.api.core.servererrors.ProtocolError; +import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; +import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; +import com.datastax.oss.driver.api.core.session.throttling.Throttled; +import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; +import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; +import com.datastax.oss.driver.internal.core.channel.DriverChannel; +import com.datastax.oss.driver.internal.core.channel.ResponseCallback; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.cql.DefaultExecutionInfo; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RepreparePayload; +import com.datastax.oss.driver.internal.core.util.Loggers; +import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.request.Prepare; +import com.datastax.oss.protocol.internal.response.Error; +import com.datastax.oss.protocol.internal.response.Result; +import com.datastax.oss.protocol.internal.response.error.Unprepared; +import com.datastax.oss.protocol.internal.response.result.Rows; +import com.datastax.oss.protocol.internal.response.result.Void; +import com.datastax.oss.protocol.internal.util.Bytes; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import io.netty.handler.codec.EncoderException; +import io.netty.util.Timeout; +import io.netty.util.Timer; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.AbstractMap; +import java.util.ArrayDeque; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; +import net.jcip.annotations.GuardedBy; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Handles a request that supports multiple response messages (a.k.a. continuous paging request). + */ +@ThreadSafe +public abstract class ContinuousRequestHandlerBase + implements Throttled { + + private static final Logger LOG = LoggerFactory.getLogger(ContinuousRequestHandlerBase.class); + + protected final String logPrefix; + protected final StatementT initialStatement; + protected final DefaultSession session; + private final CqlIdentifier keyspace; + protected final InternalDriverContext context; + private final Queue queryPlan; + protected final RequestThrottler throttler; + private final boolean protocolBackpressureAvailable; + private final Timer timer; + private final SessionMetricUpdater sessionMetricUpdater; + private final boolean specExecEnabled; + private final SessionMetric clientTimeoutsMetric; + private final SessionMetric continuousRequestsMetric; + private final NodeMetric messagesMetric; + private final List scheduledExecutions; + + // The errors on the nodes that were already tried. + // We don't use a map because nodes can appear multiple times. + protected final List> errors = new CopyOnWriteArrayList<>(); + + /** + * The list of in-flight executions, one per node. Executions may be triggered by speculative + * executions or retries. An execution is added to this list when the write operation completes. + * It is removed from this list when the callback has done reading responses. + */ + private final List inFlightCallbacks = new CopyOnWriteArrayList<>(); + + /** The callback selected to stream results back to the client. */ + private final CompletableFuture chosenCallback = new CompletableFuture<>(); + + /** + * How many speculative executions are currently running (including the initial execution). We + * track this in order to know when to fail the request if all executions have reached the end of + * the query plan. + */ + private final AtomicInteger activeExecutionsCount = new AtomicInteger(0); + + /** + * How many speculative executions have started (excluding the initial execution), whether they + * have completed or not. We track this in order to fill execution info objects with this + * information. + */ + protected final AtomicInteger startedSpeculativeExecutionsCount = new AtomicInteger(0); + + // Set when the execution starts, and is never modified after. + private final long startTimeNanos; + private volatile Timeout globalTimeout; + + private final Class resultSetClass; + + public ContinuousRequestHandlerBase( + @NonNull StatementT statement, + @NonNull DefaultSession session, + @NonNull InternalDriverContext context, + @NonNull String sessionLogPrefix, + @NonNull Class resultSetClass, + boolean specExecEnabled, + SessionMetric clientTimeoutsMetric, + SessionMetric continuousRequestsMetric, + NodeMetric messagesMetric) { + this.resultSetClass = resultSetClass; + + ProtocolVersion protocolVersion = context.getProtocolVersion(); + if (!context + .getProtocolVersionRegistry() + .supports(protocolVersion, DseProtocolFeature.CONTINUOUS_PAGING)) { + throw new IllegalStateException( + "Cannot execute continuous paging requests with protocol version " + protocolVersion); + } + this.clientTimeoutsMetric = clientTimeoutsMetric; + this.continuousRequestsMetric = continuousRequestsMetric; + this.messagesMetric = messagesMetric; + this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); + LOG.trace("[{}] Creating new continuous handler for request {}", logPrefix, statement); + this.initialStatement = statement; + this.session = session; + this.keyspace = session.getKeyspace().orElse(null); + this.context = context; + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + this.queryPlan = + statement.getNode() != null + ? new SimpleQueryPlan(statement.getNode()) + : context + .getLoadBalancingPolicyWrapper() + .newQueryPlan(statement, executionProfile.getName(), session); + this.timer = context.getNettyOptions().getTimer(); + + this.protocolBackpressureAvailable = + protocolVersion.getCode() >= DseProtocolVersion.DSE_V2.getCode(); + this.throttler = context.getRequestThrottler(); + this.sessionMetricUpdater = session.getMetricUpdater(); + this.startTimeNanos = System.nanoTime(); + this.specExecEnabled = specExecEnabled; + this.scheduledExecutions = this.specExecEnabled ? new CopyOnWriteArrayList<>() : null; + } + + @NonNull + protected abstract Duration getGlobalTimeout(); + + @NonNull + protected abstract Duration getPageTimeout(@NonNull StatementT statement, int pageNumber); + + @NonNull + protected abstract Duration getReviseRequestTimeout(@NonNull StatementT statement); + + protected abstract int getMaxEnqueuedPages(@NonNull StatementT statement); + + protected abstract int getMaxPages(@NonNull StatementT statement); + + @NonNull + protected abstract Message getMessage(@NonNull StatementT statement); + + protected abstract boolean isTracingEnabled(@NonNull StatementT statement); + + @NonNull + protected abstract Map createPayload(@NonNull StatementT statement); + + @NonNull + protected abstract ResultSetT createEmptyResultSet(@NonNull ExecutionInfo executionInfo); + + protected abstract int pageNumber(@NonNull ResultSetT resultSet); + + @NonNull + protected abstract ResultSetT createResultSet( + @NonNull StatementT statement, + @NonNull Rows rows, + @NonNull ExecutionInfo executionInfo, + @NonNull ColumnDefinitions columnDefinitions) + throws IOException; + + // MAIN LIFECYCLE + + @Override + public void onThrottleReady(boolean wasDelayed) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(initialStatement, context); + if (wasDelayed + // avoid call to nanoTime() if metric is disabled: + && sessionMetricUpdater.isEnabled( + DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { + session + .getMetricUpdater() + .updateTimer( + DefaultSessionMetric.THROTTLING_DELAY, + executionProfile.getName(), + System.nanoTime() - startTimeNanos, + TimeUnit.NANOSECONDS); + } + activeExecutionsCount.incrementAndGet(); + sendRequest(initialStatement, null, 0, 0, specExecEnabled); + } + + @Override + public void onThrottleFailure(@NonNull RequestThrottlingException error) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(initialStatement, context); + session + .getMetricUpdater() + .incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); + abortGlobalRequestOrChosenCallback(error); + } + + private void abortGlobalRequestOrChosenCallback(@NonNull Throwable error) { + if (!chosenCallback.completeExceptionally(error)) { + chosenCallback.thenAccept(callback -> callback.abort(error, false)); + } + } + + public CompletionStage handle() { + globalTimeout = scheduleGlobalTimeout(); + return fetchNextPage(); + } + + /** + * Builds the future that will get returned to the user from the initial execute call or a + * fetchNextPage() on the async API. + */ + public CompletionStage fetchNextPage() { + CompletableFuture result = new CompletableFuture<>(); + + // This is equivalent to + // `chosenCallback.thenCompose(NodeResponseCallback::dequeueOrCreatePending)`, except + // that we need to cancel `result` if `resultSetError` is a CancellationException. + chosenCallback.whenComplete( + (callback, callbackError) -> { + if (callbackError != null) { + result.completeExceptionally(callbackError); + } else { + callback + .dequeueOrCreatePending() + .whenComplete( + (resultSet, resultSetError) -> { + if (resultSetError != null) { + result.completeExceptionally(resultSetError); + } else { + result.complete(resultSet); + } + }); + } + }); + + // If the user cancels the future, propagate to our internal components + result.whenComplete( + (rs, t) -> { + if (t instanceof CancellationException) { + cancel(); + } + }); + + return result; + } + + /** + * Sends the initial request to the next available node. + * + * @param node if not null, it will be attempted first before the rest of the query plan. It + * happens only when we retry on the same host. + * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. + * @param retryCount the number of times that the retry policy was invoked for this execution + * already (note that some internal retries don't go through the policy, and therefore don't + * increment this counter) + * @param scheduleSpeculativeExecution whether to schedule the next speculative execution + */ + private void sendRequest( + StatementT statement, + @Nullable Node node, + int currentExecutionIndex, + int retryCount, + boolean scheduleSpeculativeExecution) { + DriverChannel channel = null; + if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { + while ((node = queryPlan.poll()) != null) { + channel = session.getChannel(node, logPrefix); + if (channel != null) { + break; + } else { + recordError(node, new NodeUnavailableException(node)); + } + } + } + if (channel == null) { + // We've reached the end of the query plan without finding any node to write to; abort the + // continuous paging session. + if (activeExecutionsCount.decrementAndGet() == 0) { + abortGlobalRequestOrChosenCallback(AllNodesFailedException.fromErrors(errors)); + } + } else if (!chosenCallback.isDone()) { + NodeResponseCallback nodeResponseCallback = + new NodeResponseCallback( + statement, + node, + channel, + currentExecutionIndex, + retryCount, + scheduleSpeculativeExecution, + logPrefix); + inFlightCallbacks.add(nodeResponseCallback); + channel + .write( + getMessage(statement), + isTracingEnabled(statement), + createPayload(statement), + nodeResponseCallback) + .addListener(nodeResponseCallback); + } + } + + private Timeout scheduleGlobalTimeout() { + Duration globalTimeout = getGlobalTimeout(); + if (globalTimeout.toNanos() <= 0) { + return null; + } + LOG.trace("[{}] Scheduling global timeout for pages in {}", logPrefix, globalTimeout); + return timer.newTimeout( + timeout -> + abortGlobalRequestOrChosenCallback( + new DriverTimeoutException("Query timed out after " + globalTimeout)), + globalTimeout.toNanos(), + TimeUnit.NANOSECONDS); + } + + /** + * Cancels the continuous paging request. + * + *

Called from user code, see {@link DefaultContinuousAsyncResultSet#cancel()}, or from a + * driver I/O thread. + */ + public void cancel() { + // If chosenCallback is already set, this is a no-op and the chosen callback will be handled by + // cancelScheduledTasks + chosenCallback.cancel(true); + + cancelScheduledTasks(null); + cancelGlobalTimeout(); + throttler.signalCancel(this); + } + + private void cancelGlobalTimeout() { + if (globalTimeout != null) { + globalTimeout.cancel(); + } + } + + /** + * Cancel all pending and scheduled executions, except the one passed as an argument to the + * method. + * + * @param toIgnore An optional execution to ignore (will not be cancelled). + */ + private void cancelScheduledTasks(@Nullable NodeResponseCallback toIgnore) { + if (scheduledExecutions != null) { + for (Timeout scheduledExecution : scheduledExecutions) { + scheduledExecution.cancel(); + } + } + for (NodeResponseCallback callback : inFlightCallbacks) { + if (toIgnore == null || toIgnore != callback) { + callback.cancel(); + } + } + } + + @VisibleForTesting + int getState() { + try { + return chosenCallback.get().getState(); + } catch (CancellationException e) { + // Happens if the test cancels before the callback was chosen + return NodeResponseCallback.STATE_FAILED; + } catch (InterruptedException | ExecutionException e) { + // We never interrupt or fail chosenCallback (other than canceling) + throw new AssertionError("Unexpected error", e); + } + } + + @VisibleForTesting + CompletableFuture getPendingResult() { + try { + return chosenCallback.get().getPendingResult(); + } catch (Exception e) { + // chosenCallback should always be complete by the time tests call this + throw new AssertionError("Expected callback to be chosen at this point"); + } + } + + private void recordError(@NonNull Node node, @NonNull Throwable error) { + errors.add(new AbstractMap.SimpleEntry<>(node, error)); + } + + /** + * Handles the interaction with a single node in the query plan. + * + *

An instance of this class is created each time we (re)try a node. The first callback that + * has something ready to enqueue will be allowed to stream results back to the client; the others + * will be cancelled. + */ + private class NodeResponseCallback + implements ResponseCallback, GenericFutureListener> { + + private final long messageStartTimeNanos = System.nanoTime(); + private final StatementT statement; + private final Node node; + private final DriverChannel channel; + // The identifier of the current execution (0 for the initial execution, 1 for the first + // speculative execution, etc.) + private final int executionIndex; + // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for + // the first attempt of each execution). + private final String logPrefix; + private final boolean scheduleSpeculativeExecution; + + private final DriverExecutionProfile executionProfile; + + // Coordinates concurrent accesses between the client and I/O threads + private final ReentrantLock lock = new ReentrantLock(); + + // The page queue, storing responses that we have received and have not been consumed by the + // client yet. We instantiate it lazily to avoid unnecessary allocation; this is also used to + // check if the callback ever tried to enqueue something. + @GuardedBy("lock") + private Queue queue; + + // If the client requests a page and we can't serve it immediately (empty queue), then we create + // this future and have the client wait on it. Otherwise this field is null. + @GuardedBy("lock") + private CompletableFuture pendingResult; + + // How many pages were requested. This is the total number of pages requested from the + // beginning. + // It will be zero if the protocol does not support numPagesRequested (DSE_V1) + @GuardedBy("lock") + private int numPagesRequested; + + // An integer that represents the state of the continuous paging request: + // - if positive, it is the sequence number of the next expected page; + // - if negative, it is a terminal state, identified by the constants below. + @GuardedBy("lock") + private int state = 1; + + // Whether isLastResponse has returned true already + @GuardedBy("lock") + private boolean sawLastResponse; + + @GuardedBy("lock") + private boolean sentCancelRequest; + + private static final int STATE_FINISHED = -1; + private static final int STATE_FAILED = -2; + + @GuardedBy("lock") + private int streamId = -1; + + // These are set when the first page arrives, and are never modified after. + private volatile ColumnDefinitions columnDefinitions; + + private volatile Timeout pageTimeout; + + // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for + // the first attempt, 1 for the first retry, etc.). + private final int retryCount; + + // SpeculativeExecution node metrics should be executed only for the first page (first + // invocation) + private final AtomicBoolean stopNodeMessageTimerReported = new AtomicBoolean(false); + private final AtomicBoolean nodeErrorReported = new AtomicBoolean(false); + private final AtomicBoolean nodeSuccessReported = new AtomicBoolean(false); + + public NodeResponseCallback( + StatementT statement, + Node node, + DriverChannel channel, + int executionIndex, + int retryCount, + boolean scheduleSpeculativeExecution, + String logPrefix) { + this.statement = statement; + this.node = node; + this.channel = channel; + this.executionIndex = executionIndex; + this.retryCount = retryCount; + this.scheduleSpeculativeExecution = scheduleSpeculativeExecution; + this.logPrefix = logPrefix + "|" + executionIndex; + this.executionProfile = Conversions.resolveExecutionProfile(statement, context); + } + + @Override + public void onStreamIdAssigned(int streamId) { + LOG.trace("[{}] Assigned streamId {} on node {}", logPrefix, streamId, node); + lock.lock(); + try { + this.streamId = streamId; + if (state < 0) { + // This happens if we were cancelled before getting the stream id, we have a request in + // flight that needs to be cancelled + releaseStreamId(); + } + } finally { + lock.unlock(); + } + } + + @Override + public boolean isLastResponse(@NonNull Frame responseFrame) { + lock.lock(); + try { + Message message = responseFrame.message; + boolean isLastResponse; + + if (sentCancelRequest) { + // The only response we accept is the SERVER_ERROR triggered by a successful cancellation. + // Otherwise we risk releasing and reusing the stream id while the cancel request is still + // in flight, and it might end up cancelling an unrelated request. + // Note that there is a chance that the request ends normally right after we send the + // cancel request. In that case this method never returns true and the stream id will + // remain orphaned forever. This should be very rare so this is acceptable. + if (message instanceof Error) { + Error error = (Error) message; + isLastResponse = + (error.code == ProtocolConstants.ErrorCode.SERVER_ERROR) + && error.message.contains("Session cancelled by the user"); + } else { + isLastResponse = false; + } + } else if (message instanceof Rows) { + Rows rows = (Rows) message; + DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); + isLastResponse = metadata.isLastContinuousPage; + } else { + isLastResponse = message instanceof Error; + } + + if (isLastResponse) { + sawLastResponse = true; + } + return isLastResponse; + } finally { + lock.unlock(); + } + } + + /** + * Invoked when the write from {@link #sendRequest} completes. + * + * @param future The future representing the outcome of the write operation. + */ + @Override + public void operationComplete(@NonNull Future future) { + if (!future.isSuccess()) { + Throwable error = future.cause(); + if (error instanceof EncoderException + && error.getCause() instanceof FrameTooLongException) { + trackNodeError(node, error.getCause()); + lock.lock(); + try { + abort(error.getCause(), false); + } finally { + lock.unlock(); + } + } else { + LOG.trace( + "[{}] Failed to send request on {}, trying next node (cause: {})", + logPrefix, + channel, + error); + ((DefaultNode) node) + .getMetricUpdater() + .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); + recordError(node, error); + trackNodeError(node, error.getCause()); + sendRequest(statement, null, executionIndex, retryCount, scheduleSpeculativeExecution); + } + } else { + LOG.trace("[{}] Request sent on {}", logPrefix, channel); + if (scheduleSpeculativeExecution + && Conversions.resolveIdempotence(statement, executionProfile)) { + int nextExecution = executionIndex + 1; + // Note that `node` is the first node of the execution, it might not be the "slow" one + // if there were retries, but in practice retries are rare. + long nextDelay = + Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) + .nextExecution(node, keyspace, statement, nextExecution); + if (nextDelay >= 0) { + scheduleSpeculativeExecution(nextExecution, nextDelay); + } else { + LOG.trace( + "[{}] Speculative execution policy returned {}, no next execution", + logPrefix, + nextDelay); + } + } + pageTimeout = schedulePageTimeout(1); + } + } + + private void scheduleSpeculativeExecution(int nextExecutionIndex, long delay) { + LOG.trace( + "[{}] Scheduling speculative execution {} in {} ms", + logPrefix, + nextExecutionIndex, + delay); + try { + scheduledExecutions.add( + timer.newTimeout( + (Timeout timeout) -> { + if (!chosenCallback.isDone()) { + LOG.trace( + "[{}] Starting speculative execution {}", logPrefix, nextExecutionIndex); + activeExecutionsCount.incrementAndGet(); + startedSpeculativeExecutionsCount.incrementAndGet(); + NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); + if (nodeMetricUpdater.isEnabled( + DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName())) { + nodeMetricUpdater.incrementCounter( + DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); + } + sendRequest(statement, null, nextExecutionIndex, 0, true); + } + }, + delay, + TimeUnit.MILLISECONDS)); + } catch (IllegalStateException e) { + logTimeoutSchedulingError(e); + } + } + + private Timeout schedulePageTimeout(int expectedPage) { + if (expectedPage < 0) { + return null; + } + Duration timeout = getPageTimeout(statement, expectedPage); + if (timeout.toNanos() <= 0) { + return null; + } + LOG.trace("[{}] Scheduling timeout for page {} in {}", logPrefix, expectedPage, timeout); + return timer.newTimeout( + t -> onPageTimeout(expectedPage), timeout.toNanos(), TimeUnit.NANOSECONDS); + } + + private void onPageTimeout(int expectedPage) { + lock.lock(); + try { + if (state == expectedPage) { + abort( + new DriverTimeoutException( + String.format("Timed out waiting for page %d", expectedPage)), + false); + } else { + // Ignore timeout if the request has moved on in the interim. + LOG.trace( + "[{}] Timeout fired for page {} but query already at state {}, skipping", + logPrefix, + expectedPage, + state); + } + } finally { + lock.unlock(); + } + } + + /** + * Invoked when a continuous paging response is received, either a successful or failed one. + * + *

Delegates further processing to appropriate methods: {@link #processResultResponse(Result, + * Frame)} if the response was successful, or {@link #processErrorResponse(Error)} if it wasn't. + * + * @param response the received {@link Frame}. + */ + @Override + public void onResponse(@NonNull Frame response) { + stopNodeMessageTimer(); + cancelTimeout(pageTimeout); + lock.lock(); + try { + if (state < 0) { + LOG.trace("[{}] Got result but the request has been cancelled, ignoring", logPrefix); + return; + } + try { + Message responseMessage = response.message; + if (responseMessage instanceof Result) { + LOG.trace("[{}] Got result", logPrefix); + processResultResponse((Result) responseMessage, response); + } else if (responseMessage instanceof Error) { + LOG.trace("[{}] Got error response", logPrefix); + processErrorResponse((Error) responseMessage); + } else { + IllegalStateException error = + new IllegalStateException("Unexpected response " + responseMessage); + trackNodeError(node, error); + abort(error, false); + } + } catch (Throwable t) { + trackNodeError(node, t); + abort(t, false); + } + } finally { + lock.unlock(); + } + } + + /** + * Invoked when a continuous paging request hits an unexpected error. + * + *

Delegates further processing to to the retry policy ({@link + * #processRetryVerdict(RetryVerdict, Throwable)}. + * + * @param error the error encountered, usually a network problem. + */ + @Override + public void onFailure(@NonNull Throwable error) { + cancelTimeout(pageTimeout); + LOG.trace(String.format("[%s] Request failure", logPrefix), error); + RetryVerdict verdict; + if (!Conversions.resolveIdempotence(statement, executionProfile) + || error instanceof FrameTooLongException) { + verdict = RetryVerdict.RETHROW; + } else { + try { + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); + } catch (Throwable cause) { + abort( + new IllegalStateException("Unexpected error while invoking the retry policy", cause), + false); + return; + } + } + updateErrorMetrics( + ((DefaultNode) node).getMetricUpdater(), + verdict, + DefaultNodeMetric.ABORTED_REQUESTS, + DefaultNodeMetric.RETRIES_ON_ABORTED, + DefaultNodeMetric.IGNORES_ON_ABORTED); + lock.lock(); + try { + processRetryVerdict(verdict, error); + } finally { + lock.unlock(); + } + } + + // PROCESSING METHODS + + /** + * Processes a new result response, creating the corresponding {@link ResultSetT} object and + * then enqueuing it or serving it directly to the user if he was waiting for it. + * + * @param result the result to process. It is normally a {@link Rows} object, but may be a + * {@link Void} object if the retry policy decided to ignore an error. + * @param frame the {@link Frame} (used to create the {@link ExecutionInfo} the first time). + */ + @SuppressWarnings("GuardedBy") // this method is only called with the lock held + private void processResultResponse(@NonNull Result result, @Nullable Frame frame) { + assert lock.isHeldByCurrentThread(); + try { + ExecutionInfo executionInfo = createExecutionInfo(result, frame); + if (result instanceof Rows) { + DseRowsMetadata rowsMetadata = (DseRowsMetadata) ((Rows) result).getMetadata(); + if (columnDefinitions == null) { + // Contrary to ROWS responses from regular queries, + // the first page always includes metadata so we use this + // regardless of whether or not the query was from a prepared statement. + columnDefinitions = Conversions.toColumnDefinitions(rowsMetadata, context); + } + int pageNumber = rowsMetadata.continuousPageNumber; + int currentPage = state; + if (pageNumber != currentPage) { + abort( + new IllegalStateException( + String.format( + "Received page %d but was expecting %d", pageNumber, currentPage)), + false); + } else { + int pageSize = ((Rows) result).getData().size(); + ResultSetT resultSet = + createResultSet(statement, (Rows) result, executionInfo, columnDefinitions); + if (rowsMetadata.isLastContinuousPage) { + LOG.trace("[{}] Received last page ({} - {} rows)", logPrefix, pageNumber, pageSize); + state = STATE_FINISHED; + reenableAutoReadIfNeeded(); + enqueueOrCompletePending(resultSet); + stopGlobalRequestTimer(); + cancelTimeout(globalTimeout); + } else { + LOG.trace("[{}] Received page {} ({} rows)", logPrefix, pageNumber, pageSize); + if (currentPage > 0) { + state = currentPage + 1; + } + enqueueOrCompletePending(resultSet); + } + } + } else { + // Void responses happen only when the retry decision is ignore. + assert result instanceof Void; + ResultSetT resultSet = createEmptyResultSet(executionInfo); + LOG.trace( + "[{}] Continuous paging interrupted by retry policy decision to ignore error", + logPrefix); + state = STATE_FINISHED; + reenableAutoReadIfNeeded(); + enqueueOrCompletePending(resultSet); + stopGlobalRequestTimer(); + cancelTimeout(globalTimeout); + } + } catch (Throwable error) { + abort(error, false); + } + } + + /** + * Processes an unsuccessful response. + * + *

Depending on the error, may trigger: + * + *

    + *
  1. a re-prepare cycle, see {@link #processUnprepared(Unprepared)}; + *
  2. an immediate retry on the next host, bypassing the retry policy, if the host was + * bootstrapping; + *
  3. an immediate abortion if the error is unrecoverable; + *
  4. further processing if the error is recoverable, see {@link + * #processRecoverableError(CoordinatorException)} + *
+ * + * @param errorMessage the error message received. + */ + @SuppressWarnings("GuardedBy") // this method is only called with the lock held + private void processErrorResponse(@NonNull Error errorMessage) { + assert lock.isHeldByCurrentThread(); + if (errorMessage instanceof Unprepared) { + processUnprepared((Unprepared) errorMessage); + } else { + CoordinatorException error = DseConversions.toThrowable(node, errorMessage, context); + if (error instanceof BootstrappingException) { + LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); + recordError(node, error); + trackNodeError(node, error); + sendRequest(statement, null, executionIndex, retryCount, false); + } else if (error instanceof QueryValidationException + || error instanceof FunctionFailureException + || error instanceof ProtocolError + || state > 1) { + // we only process recoverable errors for the first page, + // errors on subsequent pages will always trigger an immediate abortion + LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); + NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); + metricUpdater.incrementCounter( + DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); + trackNodeError(node, error); + abort(error, true); + } else { + try { + processRecoverableError(error); + } catch (Throwable cause) { + abort(cause, false); + } + } + } + } + + /** + * Processes a recoverable error. + * + *

In most cases, delegates to the retry policy and its decision, see {@link + * #processRetryVerdict(RetryVerdict, Throwable)}. + * + * @param error the recoverable error. + */ + private void processRecoverableError(@NonNull CoordinatorException error) { + assert lock.isHeldByCurrentThread(); + NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); + RetryVerdict verdict; + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + if (error instanceof ReadTimeoutException) { + ReadTimeoutException readTimeout = (ReadTimeoutException) error; + verdict = + retryPolicy.onReadTimeoutVerdict( + statement, + readTimeout.getConsistencyLevel(), + readTimeout.getBlockFor(), + readTimeout.getReceived(), + readTimeout.wasDataPresent(), + retryCount); + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.READ_TIMEOUTS, + DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, + DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); + } else if (error instanceof WriteTimeoutException) { + WriteTimeoutException writeTimeout = (WriteTimeoutException) error; + if (Conversions.resolveIdempotence(statement, executionProfile)) { + verdict = + retryPolicy.onWriteTimeoutVerdict( + statement, + writeTimeout.getConsistencyLevel(), + writeTimeout.getWriteType(), + writeTimeout.getBlockFor(), + writeTimeout.getReceived(), + retryCount); + } else { + verdict = RetryVerdict.RETHROW; + } + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.WRITE_TIMEOUTS, + DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, + DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); + } else if (error instanceof UnavailableException) { + UnavailableException unavailable = (UnavailableException) error; + verdict = + retryPolicy.onUnavailableVerdict( + statement, + unavailable.getConsistencyLevel(), + unavailable.getRequired(), + unavailable.getAlive(), + retryCount); + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.UNAVAILABLES, + DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, + DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); + } else { + verdict = + Conversions.resolveIdempotence(statement, executionProfile) + ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) + : RetryVerdict.RETHROW; + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.OTHER_ERRORS, + DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, + DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); + } + processRetryVerdict(verdict, error); + } + + /** + * Processes an {@link Unprepared} error by re-preparing then retrying on the same host. + * + * @param errorMessage the unprepared error message. + */ + @SuppressWarnings("GuardedBy") // this method is only called with the lock held + private void processUnprepared(@NonNull Unprepared errorMessage) { + assert lock.isHeldByCurrentThread(); + ByteBuffer idToReprepare = ByteBuffer.wrap(errorMessage.id); + LOG.trace( + "[{}] Statement {} is not prepared on {}, re-preparing", + logPrefix, + Bytes.toHexString(idToReprepare), + node); + RepreparePayload repreparePayload = session.getRepreparePayloads().get(idToReprepare); + if (repreparePayload == null) { + throw new IllegalStateException( + String.format( + "Tried to execute unprepared query %s but we don't have the data to re-prepare it", + Bytes.toHexString(idToReprepare))); + } + Prepare prepare = repreparePayload.toMessage(); + Duration timeout = executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); + ThrottledAdminRequestHandler.prepare( + channel, + true, + prepare, + repreparePayload.customPayload, + timeout, + throttler, + sessionMetricUpdater, + logPrefix) + .start() + .whenComplete( + (repreparedId, exception) -> { + // If we run into an unrecoverable error, surface it to the client instead of + // retrying + Throwable fatalError = null; + if (exception == null) { + if (!repreparedId.equals(idToReprepare)) { + IllegalStateException illegalStateException = + new IllegalStateException( + String.format( + "ID mismatch while trying to reprepare (expected %s, got %s). " + + "This prepared statement won't work anymore. " + + "This usually happens when you run a 'USE...' query after " + + "the statement was prepared.", + Bytes.toHexString(idToReprepare), Bytes.toHexString(repreparedId))); + trackNodeError(node, illegalStateException); + fatalError = illegalStateException; + } else { + LOG.trace( + "[{}] Re-prepare successful, retrying on the same node ({})", + logPrefix, + node); + sendRequest(statement, node, executionIndex, retryCount, false); + } + } else { + if (exception instanceof UnexpectedResponseException) { + Message prepareErrorMessage = ((UnexpectedResponseException) exception).message; + if (prepareErrorMessage instanceof Error) { + CoordinatorException prepareError = + DseConversions.toThrowable(node, (Error) prepareErrorMessage, context); + if (prepareError instanceof QueryValidationException + || prepareError instanceof FunctionFailureException + || prepareError instanceof ProtocolError) { + LOG.trace("[{}] Unrecoverable error on re-prepare, rethrowing", logPrefix); + trackNodeError(node, prepareError); + fatalError = prepareError; + } + } + } else if (exception instanceof RequestThrottlingException) { + trackNodeError(node, exception); + fatalError = exception; + } + if (fatalError == null) { + LOG.trace("[{}] Re-prepare failed, trying next node", logPrefix); + recordError(node, exception); + trackNodeError(node, exception); + sendRequest(statement, null, executionIndex, retryCount, false); + } + } + if (fatalError != null) { + lock.lock(); + try { + abort(fatalError, true); + } finally { + lock.unlock(); + } + } + }); + } + + /** + * Processes the retry decision by triggering a retry, aborting or ignoring; also records the + * failures for further access. + * + * @param verdict the verdict to process. + * @param error the original error. + */ + private void processRetryVerdict(@NonNull RetryVerdict verdict, @NonNull Throwable error) { + assert lock.isHeldByCurrentThread(); + LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); + switch (verdict.getRetryDecision()) { + case RETRY_SAME: + recordError(node, error); + trackNodeError(node, error); + sendRequest( + verdict.getRetryRequest(statement), node, executionIndex, retryCount + 1, false); + break; + case RETRY_NEXT: + recordError(node, error); + trackNodeError(node, error); + sendRequest( + verdict.getRetryRequest(statement), null, executionIndex, retryCount + 1, false); + break; + case RETHROW: + trackNodeError(node, error); + abort(error, true); + break; + case IGNORE: + processResultResponse(Void.INSTANCE, null); + break; + } + } + + // PAGE HANDLING + + /** + * Enqueues a response or, if the client was already waiting for it, completes the pending + * future. + * + *

Guarded by {@link #lock}. + * + * @param pageOrError the next page, or an error. + */ + @SuppressWarnings("GuardedBy") // this method is only called with the lock held + private void enqueueOrCompletePending(@NonNull Object pageOrError) { + assert lock.isHeldByCurrentThread(); + + if (queue == null) { + // This is the first time this callback tries to stream something back to the client, check + // if it can be selected + if (!chosenCallback.complete(this)) { + if (LOG.isTraceEnabled()) { + LOG.trace( + "[{}] Trying to enqueue {} but another callback was already chosen, aborting", + logPrefix, + asTraceString(pageOrError)); + } + // Discard the data, this callback will be canceled shortly since the chosen callback + // invoked cancelScheduledTasks + return; + } + + queue = new ArrayDeque<>(getMaxEnqueuedPages(statement)); + numPagesRequested = protocolBackpressureAvailable ? getMaxEnqueuedPages(statement) : 0; + cancelScheduledTasks(this); + } + + if (pendingResult != null) { + if (LOG.isTraceEnabled()) { + LOG.trace( + "[{}] Client was waiting on empty queue, completing with {}", + logPrefix, + asTraceString(pageOrError)); + } + CompletableFuture tmp = pendingResult; + // null out pendingResult before completing it because its completion + // may trigger a call to fetchNextPage -> dequeueOrCreatePending, + // which expects pendingResult to be null. + pendingResult = null; + completeResultSetFuture(tmp, pageOrError); + } else { + if (LOG.isTraceEnabled()) { + LOG.trace("[{}] Enqueuing {}", logPrefix, asTraceString(pageOrError)); + } + queue.add(pageOrError); + // Backpressure without protocol support: if the queue grows too large, + // disable auto-read so that the channel eventually becomes + // non-writable on the server side (causing it to back off for a while) + if (!protocolBackpressureAvailable + && queue.size() == getMaxEnqueuedPages(statement) + && state > 0) { + LOG.trace( + "[{}] Exceeded {} queued response pages, disabling auto-read", + logPrefix, + queue.size()); + channel.config().setAutoRead(false); + } + } + } + + /** + * Dequeue a response or, if the queue is empty, create the future that will get notified of the + * next response, when it arrives. + * + *

Called from user code, see {@link ContinuousAsyncResultSet#fetchNextPage()}. + * + * @return the next page's future; never null. + */ + @NonNull + public CompletableFuture dequeueOrCreatePending() { + lock.lock(); + try { + // If the client was already waiting for a page, there's no way it can call this method + // again + // (this is guaranteed by our public API because in order to ask for the next page, + // you need the reference to the previous page). + assert pendingResult == null; + + Object head = null; + if (queue != null) { + head = queue.poll(); + if (!protocolBackpressureAvailable + && head != null + && queue.size() == getMaxEnqueuedPages(statement) - 1) { + LOG.trace( + "[{}] Back to {} queued response pages, re-enabling auto-read", + logPrefix, + queue.size()); + channel.config().setAutoRead(true); + } + maybeRequestMore(); + } + + if (head != null) { + if (state == STATE_FAILED && !(head instanceof Throwable)) { + LOG.trace( + "[{}] Client requested next page on cancelled queue, discarding page and returning cancelled future", + logPrefix); + return cancelledResultSetFuture(); + } else { + if (LOG.isTraceEnabled()) { + LOG.trace( + "[{}] Client requested next page on non-empty queue, returning immediate future of {}", + logPrefix, + asTraceString(head)); + } + return immediateResultSetFuture(head); + } + } else { + if (state == STATE_FAILED) { + LOG.trace( + "[{}] Client requested next page on cancelled empty queue, returning cancelled future", + logPrefix); + return cancelledResultSetFuture(); + } else { + LOG.trace( + "[{}] Client requested next page but queue is empty, installing future", logPrefix); + pendingResult = new CompletableFuture<>(); + // Only schedule a timeout if we're past the first page (the first page's timeout is + // handled in sendRequest). + if (state > 1) { + pageTimeout = schedulePageTimeout(state); + // Note: each new timeout is cancelled when the next response arrives, see + // onResponse(Frame). + } + return pendingResult; + } + } + } finally { + lock.unlock(); + } + } + + /** + * If the total number of results in the queue and in-flight (requested - received) is less than + * half the queue size, then request more pages, unless the {@link #state} is failed, we're + * still waiting for the first page (so maybe still throttled or in the middle of a retry), or + * we don't support backpressure at the protocol level. + */ + @SuppressWarnings("GuardedBy") + private void maybeRequestMore() { + assert lock.isHeldByCurrentThread(); + if (state < 2 || streamId == -1 || !protocolBackpressureAvailable) { + return; + } + // if we have already requested more than the client needs, then no need to request some more + int maxPages = getMaxPages(statement); + if (maxPages > 0 && numPagesRequested >= maxPages) { + return; + } + // the pages received so far, which is the state minus one + int received = state - 1; + int requested = numPagesRequested; + // the pages that fit in the queue, which is the queue free space minus the requests in flight + int freeSpace = getMaxEnqueuedPages(statement) - queue.size(); + int inFlight = requested - received; + int numPagesFittingInQueue = freeSpace - inFlight; + if (numPagesFittingInQueue > 0 + && numPagesFittingInQueue >= getMaxEnqueuedPages(statement) / 2) { + LOG.trace("[{}] Requesting more {} pages", logPrefix, numPagesFittingInQueue); + numPagesRequested = requested + numPagesFittingInQueue; + sendMorePagesRequest(numPagesFittingInQueue); + } + } + + /** + * Sends a request for more pages (a.k.a. backpressure request). + * + * @param nextPages the number of extra pages to request. + */ + @SuppressWarnings("GuardedBy") + private void sendMorePagesRequest(int nextPages) { + assert lock.isHeldByCurrentThread(); + assert channel != null : "expected valid connection in order to request more pages"; + assert protocolBackpressureAvailable; + assert streamId != -1; + + LOG.trace("[{}] Sending request for more pages", logPrefix); + ThrottledAdminRequestHandler.query( + channel, + true, + Revise.requestMoreContinuousPages(streamId, nextPages), + statement.getCustomPayload(), + getReviseRequestTimeout(statement), + throttler, + session.getMetricUpdater(), + logPrefix, + "request " + nextPages + " more pages for id " + streamId) + .start() + .handle( + (result, error) -> { + if (error != null) { + Loggers.warnWithException( + LOG, "[{}] Error requesting more pages, aborting.", logPrefix, error); + lock.lock(); + try { + // Set fromServer to false because we want the callback to still cancel the + // session if possible or else the server will wait on a timeout. + abort(error, false); + } finally { + lock.unlock(); + } + } + return null; + }); + } + + /** Cancels the given timeout, if non null. */ + private void cancelTimeout(Timeout timeout) { + if (timeout != null) { + LOG.trace("[{}] Cancelling timeout", logPrefix); + timeout.cancel(); + } + } + + // CANCELLATION + + public void cancel() { + lock.lock(); + try { + if (state < 0) { + return; + } else { + LOG.trace( + "[{}] Cancelling continuous paging session with state {} on node {}", + logPrefix, + state, + node); + state = STATE_FAILED; + if (pendingResult != null) { + pendingResult.cancel(true); + } + releaseStreamId(); + } + } finally { + lock.unlock(); + } + reenableAutoReadIfNeeded(); + } + + @SuppressWarnings("GuardedBy") + private void releaseStreamId() { + assert lock.isHeldByCurrentThread(); + // If we saw the last response already, InFlightHandler will release the id so no need to + // cancel explicitly + if (streamId >= 0 && !sawLastResponse && !channel.closeFuture().isDone()) { + // This orphans the stream id, but it will still be held until we see the last response: + channel.cancel(this); + // This tells the server to stop streaming, and send a terminal response: + sendCancelRequest(); + } + } + + @SuppressWarnings("GuardedBy") + private void sendCancelRequest() { + assert lock.isHeldByCurrentThread(); + LOG.trace("[{}] Sending cancel request", logPrefix); + ThrottledAdminRequestHandler.query( + channel, + true, + Revise.cancelContinuousPaging(streamId), + statement.getCustomPayload(), + getReviseRequestTimeout(statement), + throttler, + session.getMetricUpdater(), + logPrefix, + "cancel request") + .start() + .handle( + (result, error) -> { + if (error != null) { + Loggers.warnWithException( + LOG, + "[{}] Error sending cancel request. " + + "This is not critical (the request will eventually time out server-side).", + logPrefix, + error); + } else { + LOG.trace("[{}] Continuous paging session cancelled successfully", logPrefix); + } + return null; + }); + sentCancelRequest = true; + } + + // TERMINATION + + private void reenableAutoReadIfNeeded() { + // Make sure we don't leave the channel unreadable + LOG.trace("[{}] Re-enabling auto-read", logPrefix); + if (!protocolBackpressureAvailable) { + channel.config().setAutoRead(true); + } + } + + // ERROR HANDLING + + private void trackNodeError(@NonNull Node node, @NonNull Throwable error) { + if (nodeErrorReported.compareAndSet(false, true)) { + long latencyNanos = System.nanoTime() - this.messageStartTimeNanos; + context + .getRequestTracker() + .onNodeError(this.statement, error, latencyNanos, executionProfile, node, logPrefix); + } + } + + /** + * Aborts the continuous paging session due to an error that can be either from the server or + * the client. + * + * @param error the error that causes the abortion. + * @param fromServer whether the error was triggered by the coordinator or by the driver. + */ + @SuppressWarnings("GuardedBy") // this method is only called with the lock held + private void abort(@NonNull Throwable error, boolean fromServer) { + assert lock.isHeldByCurrentThread(); + LOG.trace( + "[{}] Aborting due to {} ({})", + logPrefix, + error.getClass().getSimpleName(), + error.getMessage()); + if (channel == null) { + // This only happens when sending the initial request, if no host was available + // or if the iterator returned by the LBP threw an exception. + // In either case the write was not even attempted, and + // we set the state right now. + enqueueOrCompletePending(error); + state = STATE_FAILED; + } else if (state > 0) { + enqueueOrCompletePending(error); + if (fromServer) { + // We can safely assume the server won't send any more responses, + // so set the state and call release() right now. + state = STATE_FAILED; + reenableAutoReadIfNeeded(); + } else { + // attempt to cancel first, i.e. ask server to stop sending responses, + // and only then release. + cancel(); + } + } + stopGlobalRequestTimer(); + cancelTimeout(globalTimeout); + } + + // METRICS + + private void stopNodeMessageTimer() { + if (stopNodeMessageTimerReported.compareAndSet(false, true)) { + ((DefaultNode) node) + .getMetricUpdater() + .updateTimer( + messagesMetric, + executionProfile.getName(), + System.nanoTime() - messageStartTimeNanos, + TimeUnit.NANOSECONDS); + } + } + + private void stopGlobalRequestTimer() { + session + .getMetricUpdater() + .updateTimer( + continuousRequestsMetric, + null, + System.nanoTime() - startTimeNanos, + TimeUnit.NANOSECONDS); + } + + private void updateErrorMetrics( + @NonNull NodeMetricUpdater metricUpdater, + @NonNull RetryVerdict verdict, + @NonNull DefaultNodeMetric error, + @NonNull DefaultNodeMetric retriesOnError, + @NonNull DefaultNodeMetric ignoresOnError) { + metricUpdater.incrementCounter(error, executionProfile.getName()); + switch (verdict.getRetryDecision()) { + case RETRY_SAME: + case RETRY_NEXT: + metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); + metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); + break; + case IGNORE: + metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); + metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); + break; + case RETHROW: + // nothing do do + } + } + + // UTILITY METHODS + + @NonNull + private CompletableFuture immediateResultSetFuture(@NonNull Object pageOrError) { + CompletableFuture future = new CompletableFuture<>(); + completeResultSetFuture(future, pageOrError); + return future; + } + + @NonNull + private CompletableFuture cancelledResultSetFuture() { + return immediateResultSetFuture( + new CancellationException( + "Can't get more results because the continuous query has failed already. " + + "Most likely this is because the query was cancelled")); + } + + private void completeResultSetFuture( + @NonNull CompletableFuture future, @NonNull Object pageOrError) { + long now = System.nanoTime(); + long totalLatencyNanos = now - startTimeNanos; + long nodeLatencyNanos = now - messageStartTimeNanos; + if (resultSetClass.isInstance(pageOrError)) { + if (future.complete(resultSetClass.cast(pageOrError))) { + throttler.signalSuccess(ContinuousRequestHandlerBase.this); + if (nodeSuccessReported.compareAndSet(false, true)) { + context + .getRequestTracker() + .onNodeSuccess(statement, nodeLatencyNanos, executionProfile, node, logPrefix); + } + context + .getRequestTracker() + .onSuccess(statement, totalLatencyNanos, executionProfile, node, logPrefix); + } + } else { + Throwable error = (Throwable) pageOrError; + if (future.completeExceptionally(error)) { + context + .getRequestTracker() + .onError(statement, error, totalLatencyNanos, executionProfile, node, logPrefix); + if (error instanceof DriverTimeoutException) { + throttler.signalTimeout(ContinuousRequestHandlerBase.this); + session + .getMetricUpdater() + .incrementCounter(clientTimeoutsMetric, executionProfile.getName()); + } else if (!(error instanceof RequestThrottlingException)) { + throttler.signalError(ContinuousRequestHandlerBase.this, error); + } + } + } + } + + @NonNull + private ExecutionInfo createExecutionInfo(@NonNull Result result, @Nullable Frame response) { + ByteBuffer pagingState = + result instanceof Rows ? ((Rows) result).getMetadata().pagingState : null; + return new DefaultExecutionInfo( + statement, + node, + startedSpeculativeExecutionsCount.get(), + executionIndex, + errors, + pagingState, + response, + true, + session, + context, + executionProfile); + } + + private void logTimeoutSchedulingError(IllegalStateException timeoutError) { + // If we're racing with session shutdown, the timer might be stopped already. We don't want + // to schedule more executions anyway, so swallow the error. + if (!"cannot be started once stopped".equals(timeoutError.getMessage())) { + Loggers.warnWithException( + LOG, "[{}] Error while scheduling timeout", logPrefix, timeoutError); + } + } + + @NonNull + private String asTraceString(@NonNull Object pageOrError) { + return resultSetClass.isInstance(pageOrError) + ? "page " + pageNumber(resultSetClass.cast(pageOrError)) + : ((Exception) pageOrError).getClass().getSimpleName(); + } + + private int getState() { + lock.lock(); + try { + return state; + } finally { + lock.unlock(); + } + } + + private CompletableFuture getPendingResult() { + lock.lock(); + try { + return pendingResult; + } finally { + lock.unlock(); + } + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java new file mode 100644 index 00000000000..8562fde5905 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSet.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe // wraps a mutable queue +public class DefaultContinuousAsyncResultSet implements ContinuousAsyncResultSet { + + private final Iterable currentPage; + private final ColumnDefinitions columnDefinitions; + private final int pageNumber; + private final boolean hasMorePages; + private final ExecutionInfo executionInfo; + private final ContinuousCqlRequestHandler handler; + private final CountingIterator iterator; + + public DefaultContinuousAsyncResultSet( + CountingIterator iterator, + ColumnDefinitions columnDefinitions, + int pageNumber, + boolean hasMorePages, + ExecutionInfo executionInfo, + ContinuousCqlRequestHandler handler) { + this.columnDefinitions = columnDefinitions; + this.pageNumber = pageNumber; + this.hasMorePages = hasMorePages; + this.executionInfo = executionInfo; + this.handler = handler; + this.iterator = iterator; + this.currentPage = () -> iterator; + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return columnDefinitions; + } + + @Override + public boolean wasApplied() { + // always return true for non-conditional updates + return true; + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + return executionInfo; + } + + @Override + public int pageNumber() { + return pageNumber; + } + + @Override + public boolean hasMorePages() { + return hasMorePages; + } + + @NonNull + @Override + public Iterable currentPage() { + return currentPage; + } + + @Override + public int remaining() { + return iterator.remaining(); + } + + @NonNull + @Override + public CompletionStage fetchNextPage() throws IllegalStateException { + if (!hasMorePages()) { + throw new IllegalStateException( + "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); + } + return handler.fetchNextPage(); + } + + @Override + public void cancel() { + handler.cancel(); + } + + public static ContinuousAsyncResultSet empty(ExecutionInfo executionInfo) { + + return new ContinuousAsyncResultSet() { + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return EmptyColumnDefinitions.INSTANCE; + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + return executionInfo; + } + + @NonNull + @Override + public Iterable currentPage() { + return Collections.emptyList(); + } + + @Override + public int remaining() { + return 0; + } + + @Override + public boolean hasMorePages() { + return false; + } + + @Override + public int pageNumber() { + return 1; + } + + @NonNull + @Override + public CompletionStage fetchNextPage() + throws IllegalStateException { + throw new IllegalStateException( + "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); + } + + @Override + public void cancel() { + // noop + } + + @Override + public boolean wasApplied() { + // always true + return true; + } + }; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java new file mode 100644 index 00000000000..929400bc7a6 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSet.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import net.jcip.annotations.NotThreadSafe; + +/** + * This class is roughly equivalent to {@link + * com.datastax.oss.driver.internal.core.cql.MultiPageResultSet}, except that {@link + * RowIterator#maybeMoveToNextPage()} needs to check for cancellation before fetching the next page. + */ +@NotThreadSafe +public class DefaultContinuousResultSet implements ContinuousResultSet { + + private final RowIterator iterator; + private final List executionInfos = new ArrayList<>(); + private final ColumnDefinitions columnDefinitions; + + public DefaultContinuousResultSet(ContinuousAsyncResultSet firstPage) { + iterator = new RowIterator(firstPage); + columnDefinitions = firstPage.getColumnDefinitions(); + executionInfos.add(firstPage.getExecutionInfo()); + } + + @Override + public void cancel() { + iterator.cancel(); + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return columnDefinitions; + } + + @NonNull + @Override + public List getExecutionInfos() { + return executionInfos; + } + + @NonNull + @Override + public Iterator iterator() { + return iterator; + } + + @Override + public boolean isFullyFetched() { + return iterator.isFullyFetched(); + } + + @Override + public int getAvailableWithoutFetching() { + return iterator.remaining(); + } + + @Override + public boolean wasApplied() { + return iterator.wasApplied(); + } + + private class RowIterator extends CountingIterator { + private ContinuousAsyncResultSet currentPage; + private Iterator currentRows; + private boolean cancelled = false; + + private RowIterator(ContinuousAsyncResultSet firstPage) { + super(firstPage.remaining()); + currentPage = firstPage; + currentRows = firstPage.currentPage().iterator(); + } + + @Override + protected Row computeNext() { + maybeMoveToNextPage(); + return currentRows.hasNext() ? currentRows.next() : endOfData(); + } + + private void maybeMoveToNextPage() { + if (!cancelled && !currentRows.hasNext() && currentPage.hasMorePages()) { + BlockingOperation.checkNotDriverThread(); + ContinuousAsyncResultSet nextPage = + CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); + currentPage = nextPage; + remaining += currentPage.remaining(); + currentRows = nextPage.currentPage().iterator(); + executionInfos.add(nextPage.getExecutionInfo()); + } + } + + private boolean isFullyFetched() { + return !currentPage.hasMorePages(); + } + + private boolean wasApplied() { + return currentPage.wasApplied(); + } + + private void cancel() { + currentPage.cancel(); + cancelled = true; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java new file mode 100644 index 00000000000..afe0e864181 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessor.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous.reactive; + +import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; +import com.datastax.dse.driver.internal.core.cql.reactive.FailedReactiveResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class ContinuousCqlRequestReactiveProcessor + implements RequestProcessor, ContinuousReactiveResultSet> { + + public static final GenericType CONTINUOUS_REACTIVE_RESULT_SET = + GenericType.of(ContinuousReactiveResultSet.class); + + private final ContinuousCqlRequestAsyncProcessor asyncProcessor; + + public ContinuousCqlRequestReactiveProcessor(ContinuousCqlRequestAsyncProcessor asyncProcessor) { + this.asyncProcessor = asyncProcessor; + } + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return request instanceof Statement && resultType.equals(CONTINUOUS_REACTIVE_RESULT_SET); + } + + @Override + public ContinuousReactiveResultSet process( + Statement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + return new DefaultContinuousReactiveResultSet( + () -> asyncProcessor.process(request, session, context, sessionLogPrefix)); + } + + @Override + public ContinuousReactiveResultSet newFailure(RuntimeException error) { + return new FailedReactiveResultSet(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java new file mode 100644 index 00000000000..b3f301edea6 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/DefaultContinuousReactiveResultSet.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous.reactive; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; +import com.datastax.dse.driver.internal.core.cql.reactive.ReactiveResultSetBase; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class DefaultContinuousReactiveResultSet + extends ReactiveResultSetBase implements ContinuousReactiveResultSet { + + public DefaultContinuousReactiveResultSet( + Callable> firstPage) { + super(firstPage); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java new file mode 100644 index 00000000000..3539c2e698c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessor.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class CqlRequestReactiveProcessor + implements RequestProcessor, ReactiveResultSet> { + + public static final GenericType REACTIVE_RESULT_SET = + GenericType.of(ReactiveResultSet.class); + + private final CqlRequestAsyncProcessor asyncProcessor; + + public CqlRequestReactiveProcessor(CqlRequestAsyncProcessor asyncProcessor) { + this.asyncProcessor = asyncProcessor; + } + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return request instanceof Statement && resultType.equals(REACTIVE_RESULT_SET); + } + + @Override + public ReactiveResultSet process( + Statement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + return new DefaultReactiveResultSet( + () -> asyncProcessor.process(request, session, context, sessionLogPrefix)); + } + + @Override + public ReactiveResultSet newFailure(RuntimeException error) { + return new FailedReactiveResultSet(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java new file mode 100644 index 00000000000..33b6dc02f48 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSet.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class DefaultReactiveResultSet extends ReactiveResultSetBase { + + public DefaultReactiveResultSet(Callable> firstPage) { + super(firstPage); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java new file mode 100644 index 00000000000..ca3b93e7f6b --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveRow.java @@ -0,0 +1,580 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe +class DefaultReactiveRow implements ReactiveRow { + + private final Row row; + private final ExecutionInfo executionInfo; + + DefaultReactiveRow(@NonNull Row row, @NonNull ExecutionInfo executionInfo) { + this.row = row; + this.executionInfo = executionInfo; + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + return executionInfo; + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return row.getColumnDefinitions(); + } + + @Override + public ByteBuffer getBytesUnsafe(int i) { + return row.getBytesUnsafe(i); + } + + @Override + public boolean isNull(int i) { + return row.isNull(i); + } + + @Override + public T get(int i, TypeCodec codec) { + return row.get(i, codec); + } + + @Override + public T get(int i, GenericType targetType) { + return row.get(i, targetType); + } + + @Override + public T get(int i, Class targetClass) { + return row.get(i, targetClass); + } + + @Override + public Object getObject(int i) { + return row.getObject(i); + } + + @Override + public boolean getBoolean(int i) { + return row.getBoolean(i); + } + + @Override + public byte getByte(int i) { + return row.getByte(i); + } + + @Override + public double getDouble(int i) { + return row.getDouble(i); + } + + @Override + public float getFloat(int i) { + return row.getFloat(i); + } + + @Override + public int getInt(int i) { + return row.getInt(i); + } + + @Override + public long getLong(int i) { + return row.getLong(i); + } + + @Override + public short getShort(int i) { + return row.getShort(i); + } + + @Override + public Instant getInstant(int i) { + return row.getInstant(i); + } + + @Override + public LocalDate getLocalDate(int i) { + return row.getLocalDate(i); + } + + @Override + public LocalTime getLocalTime(int i) { + return row.getLocalTime(i); + } + + @Override + public ByteBuffer getByteBuffer(int i) { + return row.getByteBuffer(i); + } + + @Override + public String getString(int i) { + return row.getString(i); + } + + @Override + public BigInteger getBigInteger(int i) { + return row.getBigInteger(i); + } + + @Override + public BigDecimal getBigDecimal(int i) { + return row.getBigDecimal(i); + } + + @Override + public UUID getUuid(int i) { + return row.getUuid(i); + } + + @Override + public InetAddress getInetAddress(int i) { + return row.getInetAddress(i); + } + + @Override + public CqlDuration getCqlDuration(int i) { + return row.getCqlDuration(i); + } + + @Override + public Token getToken(int i) { + return row.getToken(i); + } + + @Override + public List getList(int i, @NonNull Class elementsClass) { + return row.getList(i, elementsClass); + } + + @Override + public Set getSet(int i, @NonNull Class elementsClass) { + return row.getSet(i, elementsClass); + } + + @Override + public Map getMap(int i, @NonNull Class keyClass, @NonNull Class valueClass) { + return row.getMap(i, keyClass, valueClass); + } + + @Override + public UdtValue getUdtValue(int i) { + return row.getUdtValue(i); + } + + @Override + public TupleValue getTupleValue(int i) { + return row.getTupleValue(i); + } + + @Override + public int size() { + return row.size(); + } + + @NonNull + @Override + public DataType getType(int i) { + return row.getType(i); + } + + @NonNull + @Override + public CodecRegistry codecRegistry() { + return row.codecRegistry(); + } + + @NonNull + @Override + public ProtocolVersion protocolVersion() { + return row.protocolVersion(); + } + + @Override + public ByteBuffer getBytesUnsafe(@NonNull String name) { + return row.getBytesUnsafe(name); + } + + @Override + public boolean isNull(@NonNull String name) { + return row.isNull(name); + } + + @Override + public T get(@NonNull String name, @NonNull TypeCodec codec) { + return row.get(name, codec); + } + + @Override + public T get(@NonNull String name, @NonNull GenericType targetType) { + return row.get(name, targetType); + } + + @Override + public T get(@NonNull String name, @NonNull Class targetClass) { + return row.get(name, targetClass); + } + + @Override + public Object getObject(@NonNull String name) { + return row.getObject(name); + } + + @Override + public boolean getBoolean(@NonNull String name) { + return row.getBoolean(name); + } + + @Override + public byte getByte(@NonNull String name) { + return row.getByte(name); + } + + @Override + public double getDouble(@NonNull String name) { + return row.getDouble(name); + } + + @Override + public float getFloat(@NonNull String name) { + return row.getFloat(name); + } + + @Override + public int getInt(@NonNull String name) { + return row.getInt(name); + } + + @Override + public long getLong(@NonNull String name) { + return row.getLong(name); + } + + @Override + public short getShort(@NonNull String name) { + return row.getShort(name); + } + + @Override + public Instant getInstant(@NonNull String name) { + return row.getInstant(name); + } + + @Override + public LocalDate getLocalDate(@NonNull String name) { + return row.getLocalDate(name); + } + + @Override + public LocalTime getLocalTime(@NonNull String name) { + return row.getLocalTime(name); + } + + @Override + public ByteBuffer getByteBuffer(@NonNull String name) { + return row.getByteBuffer(name); + } + + @Override + public String getString(@NonNull String name) { + return row.getString(name); + } + + @Override + public BigInteger getBigInteger(@NonNull String name) { + return row.getBigInteger(name); + } + + @Override + public BigDecimal getBigDecimal(@NonNull String name) { + return row.getBigDecimal(name); + } + + @Override + public UUID getUuid(@NonNull String name) { + return row.getUuid(name); + } + + @Override + public InetAddress getInetAddress(@NonNull String name) { + return row.getInetAddress(name); + } + + @Override + public CqlDuration getCqlDuration(@NonNull String name) { + return row.getCqlDuration(name); + } + + @Override + public Token getToken(@NonNull String name) { + return row.getToken(name); + } + + @Override + public List getList(@NonNull String name, @NonNull Class elementsClass) { + return row.getList(name, elementsClass); + } + + @Override + public Set getSet(@NonNull String name, @NonNull Class elementsClass) { + return row.getSet(name, elementsClass); + } + + @Override + public Map getMap( + @NonNull String name, @NonNull Class keyClass, @NonNull Class valueClass) { + return row.getMap(name, keyClass, valueClass); + } + + @Override + public UdtValue getUdtValue(@NonNull String name) { + return row.getUdtValue(name); + } + + @Override + public TupleValue getTupleValue(@NonNull String name) { + return row.getTupleValue(name); + } + + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + return row.allIndicesOf(name); + } + + @Override + public int firstIndexOf(@NonNull String name) { + return row.firstIndexOf(name); + } + + @NonNull + @Override + public DataType getType(@NonNull String name) { + return row.getType(name); + } + + @Override + public ByteBuffer getBytesUnsafe(@NonNull CqlIdentifier id) { + return row.getBytesUnsafe(id); + } + + @Override + public boolean isNull(@NonNull CqlIdentifier id) { + return row.isNull(id); + } + + @Override + public T get(@NonNull CqlIdentifier id, @NonNull TypeCodec codec) { + return row.get(id, codec); + } + + @Override + public T get(@NonNull CqlIdentifier id, @NonNull GenericType targetType) { + return row.get(id, targetType); + } + + @Override + public T get(@NonNull CqlIdentifier id, @NonNull Class targetClass) { + return row.get(id, targetClass); + } + + @Override + public Object getObject(@NonNull CqlIdentifier id) { + return row.getObject(id); + } + + @Override + public boolean getBoolean(@NonNull CqlIdentifier id) { + return row.getBoolean(id); + } + + @Override + public byte getByte(@NonNull CqlIdentifier id) { + return row.getByte(id); + } + + @Override + public double getDouble(@NonNull CqlIdentifier id) { + return row.getDouble(id); + } + + @Override + public float getFloat(@NonNull CqlIdentifier id) { + return row.getFloat(id); + } + + @Override + public int getInt(@NonNull CqlIdentifier id) { + return row.getInt(id); + } + + @Override + public long getLong(@NonNull CqlIdentifier id) { + return row.getLong(id); + } + + @Override + public short getShort(@NonNull CqlIdentifier id) { + return row.getShort(id); + } + + @Override + public Instant getInstant(@NonNull CqlIdentifier id) { + return row.getInstant(id); + } + + @Override + public LocalDate getLocalDate(@NonNull CqlIdentifier id) { + return row.getLocalDate(id); + } + + @Override + public LocalTime getLocalTime(@NonNull CqlIdentifier id) { + return row.getLocalTime(id); + } + + @Override + public ByteBuffer getByteBuffer(@NonNull CqlIdentifier id) { + return row.getByteBuffer(id); + } + + @Override + public String getString(@NonNull CqlIdentifier id) { + return row.getString(id); + } + + @Override + public BigInteger getBigInteger(@NonNull CqlIdentifier id) { + return row.getBigInteger(id); + } + + @Override + public BigDecimal getBigDecimal(@NonNull CqlIdentifier id) { + return row.getBigDecimal(id); + } + + @Override + public UUID getUuid(@NonNull CqlIdentifier id) { + return row.getUuid(id); + } + + @Override + public InetAddress getInetAddress(@NonNull CqlIdentifier id) { + return row.getInetAddress(id); + } + + @Override + public CqlDuration getCqlDuration(@NonNull CqlIdentifier id) { + return row.getCqlDuration(id); + } + + @Override + public Token getToken(@NonNull CqlIdentifier id) { + return row.getToken(id); + } + + @Override + public List getList(@NonNull CqlIdentifier id, @NonNull Class elementsClass) { + return row.getList(id, elementsClass); + } + + @Override + public Set getSet(@NonNull CqlIdentifier id, @NonNull Class elementsClass) { + return row.getSet(id, elementsClass); + } + + @Override + public Map getMap( + @NonNull CqlIdentifier id, @NonNull Class keyClass, @NonNull Class valueClass) { + return row.getMap(id, keyClass, valueClass); + } + + @Override + public UdtValue getUdtValue(@NonNull CqlIdentifier id) { + return row.getUdtValue(id); + } + + @Override + public TupleValue getTupleValue(@NonNull CqlIdentifier id) { + return row.getTupleValue(id); + } + + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + return row.allIndicesOf(id); + } + + @Override + public int firstIndexOf(@NonNull CqlIdentifier id) { + return row.firstIndexOf(id); + } + + @NonNull + @Override + public DataType getType(@NonNull CqlIdentifier id) { + return row.getType(id); + } + + @Override + public boolean isDetached() { + return row.isDetached(); + } + + @Override + public void attach(@NonNull AttachmentPoint attachmentPoint) { + row.attach(attachmentPoint); + } + + @Override + public String toString() { + return "DefaultReactiveRow{row=" + row + ", executionInfo=" + executionInfo + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java new file mode 100644 index 00000000000..f760ecc395e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/EmptySubscription.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import org.reactivestreams.Subscription; + +public class EmptySubscription implements Subscription { + + public static final EmptySubscription INSTANCE = new EmptySubscription(); + + private EmptySubscription() {} + + @Override + public void request(long n) {} + + @Override + public void cancel() {} +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java new file mode 100644 index 00000000000..638434bb2d0 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedPublisher.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import java.util.Objects; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +/** + * A {@link Publisher} that immediately signals the error passed at instantiation to all its + * subscribers. + */ +public class FailedPublisher implements Publisher { + + protected final Throwable error; + + public FailedPublisher(Throwable error) { + this.error = error; + } + + @Override + public void subscribe(Subscriber subscriber) { + Objects.requireNonNull(subscriber, "Subscriber cannot be null"); + // Per rule 1.9, we need to call onSubscribe before any other signal. Pass a dummy + // subscription since we know it will never be used. + subscriber.onSubscribe(EmptySubscription.INSTANCE); + // Signal the error to the subscriber right away. This is safe to do because per rule 2.10, + // a Subscriber MUST be prepared to receive an onError signal without a preceding + // Subscription.request(long n) call. + // Also, per rule 2.13: onError MUST return normally except when any provided parameter + // is null (which is not the case here); so we don't need care about catching errors here. + subscriber.onError(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java new file mode 100644 index 00000000000..31c34d649aa --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/FailedReactiveResultSet.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.reactivestreams.Publisher; + +/** + * A {@link ReactiveResultSet} that immediately signals the error passed at instantiation to all its + * subscribers. + * + * @see CqlRequestReactiveProcessor#newFailure(java.lang.RuntimeException) + * @see ContinuousCqlRequestReactiveProcessor#newFailure(java.lang.RuntimeException) + */ +public class FailedReactiveResultSet extends FailedPublisher + implements ReactiveResultSet, ContinuousReactiveResultSet { + + public FailedReactiveResultSet(Throwable error) { + super(error); + } + + @NonNull + @Override + public Publisher getColumnDefinitions() { + return new FailedPublisher<>(error); + } + + @NonNull + @Override + public Publisher getExecutionInfos() { + return new FailedPublisher<>(error); + } + + @NonNull + @Override + public Publisher wasApplied() { + return new FailedPublisher<>(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java new file mode 100644 index 00000000000..f058149f570 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveOperators.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.concurrent.atomic.AtomicLong; + +public final class ReactiveOperators { + + /** + * Atomically adds the given value to the given AtomicLong, bound to Long.MAX_VALUE. + * + * @param current the current value. + * @param toAdd the delta to add. + */ + public static void addCap(@NonNull AtomicLong current, long toAdd) { + long r, u; + do { + r = current.get(); + if (r == Long.MAX_VALUE) { + return; + } + u = r + toAdd; + if (u < 0L) { + u = Long.MAX_VALUE; + } + } while (!current.compareAndSet(r, u)); + } + + /** + * Atomically subtracts the given value from the given AtomicLong, bound to 0. + * + * @param current the current value. + * @param toSub the delta to subtract. + */ + public static void subCap(@NonNull AtomicLong current, long toSub) { + long r, u; + do { + r = current.get(); + if (r == 0 || r == Long.MAX_VALUE) { + return; + } + u = Math.max(r - toSub, 0); + } while (!current.compareAndSet(r, u)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java new file mode 100644 index 00000000000..5ba00e22298 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetBase.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.AsyncPagingIterable; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicBoolean; +import net.jcip.annotations.ThreadSafe; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +@ThreadSafe +public abstract class ReactiveResultSetBase> + implements ReactiveResultSet { + + private final Callable> firstPage; + + private final AtomicBoolean alreadySubscribed = new AtomicBoolean(false); + + private final SimpleUnicastProcessor columnDefinitionsPublisher = + new SimpleUnicastProcessor<>(); + + private final SimpleUnicastProcessor executionInfosPublisher = + new SimpleUnicastProcessor<>(); + + private final SimpleUnicastProcessor wasAppliedPublisher = + new SimpleUnicastProcessor<>(); + + protected ReactiveResultSetBase(Callable> firstPage) { + this.firstPage = firstPage; + } + + @Override + public void subscribe(@NonNull Subscriber subscriber) { + // As per rule 1.9, we need to throw an NPE if subscriber is null + Objects.requireNonNull(subscriber, "Subscriber cannot be null"); + // As per rule 1.11, this publisher is allowed to support only one subscriber. + if (alreadySubscribed.compareAndSet(false, true)) { + ReactiveResultSetSubscription subscription = + new ReactiveResultSetSubscription<>( + subscriber, columnDefinitionsPublisher, executionInfosPublisher, wasAppliedPublisher); + try { + subscriber.onSubscribe(subscription); + // must be done after onSubscribe + subscription.start(firstPage); + } catch (Throwable t) { + // As per rule 2.13: In the case that this rule is violated, + // any associated Subscription to the Subscriber MUST be considered as + // cancelled, and the caller MUST raise this error condition in a fashion + // that is adequate for the runtime environment. + subscription.doOnError( + new IllegalStateException( + subscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", + t)); + } + } else { + subscriber.onSubscribe(EmptySubscription.INSTANCE); + subscriber.onError( + new IllegalStateException("This publisher does not support multiple subscriptions")); + } + // As per 2.13, this method must return normally (i.e. not throw) + } + + @NonNull + @Override + public Publisher getColumnDefinitions() { + return columnDefinitionsPublisher; + } + + @NonNull + @Override + public Publisher getExecutionInfos() { + return executionInfosPublisher; + } + + @NonNull + @Override + public Publisher wasApplied() { + return wasAppliedPublisher; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java new file mode 100644 index 00000000000..500a291e9d2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscription.java @@ -0,0 +1,493 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.internal.core.util.concurrent.BoundedConcurrentQueue; +import com.datastax.oss.driver.api.core.AsyncPagingIterable; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Iterator; +import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import net.jcip.annotations.ThreadSafe; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A single-subscriber subscription that executes one single query and emits all the returned rows. + * + *

This class can handle both continuous and non-continuous result sets. + */ +@ThreadSafe +public class ReactiveResultSetSubscription> + implements Subscription { + + private static final Logger LOG = LoggerFactory.getLogger(ReactiveResultSetSubscription.class); + + private static final int MAX_ENQUEUED_PAGES = 4; + + /** Tracks the number of items requested by the subscriber. */ + private final AtomicLong requested = new AtomicLong(0); + + /** The pages received so far, with a maximum of MAX_ENQUEUED_PAGES elements. */ + private final BoundedConcurrentQueue> pages = + new BoundedConcurrentQueue<>(MAX_ENQUEUED_PAGES); + + /** + * Used to signal that a thread is currently draining, i.e., emitting items to the subscriber. + * When it is zero, that means there is no ongoing emission. This mechanism effectively serializes + * access to the drain() method, and also keeps track of missed attempts to enter it, since each + * thread that attempts to drain will increment this counter. + * + * @see #drain() + */ + private final AtomicInteger draining = new AtomicInteger(0); + + /** + * Waited upon by the driver and completed when the subscriber requests its first item. + * + *

Used to hold off emitting results until the subscriber issues its first request for items. + * Since this future is only completed from {@link #request(long)}, this effectively conditions + * the enqueueing of the first page to the reception of the subscriber's first request. + * + *

This mechanism avoids sending terminal signals before a request is made when the stream is + * empty. Note that as per 2.9, "a Subscriber MUST be prepared to receive an onComplete signal + * with or without a preceding Subscription.request(long n) call." However, the TCK considers it + * as unfair behavior. + * + * @see #start(Callable) + * @see #request(long) + */ + private final CompletableFuture firstSubscriberRequestArrived = new CompletableFuture<>(); + + /** non-final because it has to be de-referenced, see {@link #clear()}. */ + private volatile Subscriber mainSubscriber; + + private volatile Subscriber columnDefinitionsSubscriber; + + private volatile Subscriber executionInfosSubscriber; + + private volatile Subscriber wasAppliedSubscriber; + + /** + * Set to true when the subscription is cancelled, which happens when an error is encountered, + * when the result set is fully consumed and the subscription terminates, or when the subscriber + * manually calls {@link #cancel()}. + */ + private volatile boolean cancelled = false; + + ReactiveResultSetSubscription( + @NonNull Subscriber mainSubscriber, + @NonNull Subscriber columnDefinitionsSubscriber, + @NonNull Subscriber executionInfosSubscriber, + @NonNull Subscriber wasAppliedSubscriber) { + this.mainSubscriber = mainSubscriber; + this.columnDefinitionsSubscriber = columnDefinitionsSubscriber; + this.executionInfosSubscriber = executionInfosSubscriber; + this.wasAppliedSubscriber = wasAppliedSubscriber; + } + + /** + * Starts the query execution. + * + *

Must be called immediately after creating the subscription, but after {@link + * Subscriber#onSubscribe(Subscription)}. + * + * @param firstPage The future that, when complete, will produce the first page. + */ + void start(@NonNull Callable> firstPage) { + firstSubscriberRequestArrived.thenAccept( + (aVoid) -> fetchNextPageAndEnqueue(new Page<>(firstPage), true)); + } + + @Override + public void request(long n) { + // As per 3.6: after the Subscription is cancelled, additional + // calls to request() MUST be NOPs. + if (!cancelled) { + if (n < 1) { + // Validate request as per rule 3.9 + doOnError( + new IllegalArgumentException( + mainSubscriber + + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); + } else { + // As per rule 3.17, when demand overflows Long.MAX_VALUE + // it can be treated as "effectively unbounded" + ReactiveOperators.addCap(requested, n); + // Set the first future to true if not done yet. + // This will make the first page of results ready for consumption, + // see start(). + // As per 2.7 it is the subscriber's responsibility to provide + // external synchronization when calling request(), + // so the check-then-act idiom below is good enough + // (and besides, complete() is idempotent). + if (!firstSubscriberRequestArrived.isDone()) { + firstSubscriberRequestArrived.complete(null); + } + drain(); + } + } + } + + @Override + public void cancel() { + // As per 3.5: Subscription.cancel() MUST respect the responsiveness of + // its caller by returning in a timely manner, MUST be idempotent and + // MUST be thread-safe. + if (!cancelled) { + cancelled = true; + if (draining.getAndIncrement() == 0) { + // If nobody is draining, clear now; + // otherwise, the draining thread will notice + // that the cancelled flag was set + // and will clear for us. + clear(); + } + } + } + + /** + * Attempts to drain available items, i.e. emit them to the subscriber. + * + *

Access to this method is serialized by the field {@link #draining}: only one thread at a + * time can drain, but threads that attempt to drain while other thread is already draining + * increment that field; the draining thread, before finishing its work, checks for such failed + * attempts and triggers another round of draining if that was the case. + * + *

The loop is interrupted when 1) the requested amount has been met or 2) when there are no + * more items readily available or 3) the subscription has been cancelled. + * + *

The loop also checks for stream exhaustion and emits a terminal {@code onComplete} signal in + * this case. + * + *

This method may run on a driver IO thread when invoked from {@link + * #fetchNextPageAndEnqueue(Page, boolean)}, or on a subscriber thread, when invoked from {@link + * #request(long)}. + */ + @SuppressWarnings("ConditionalBreakInInfiniteLoop") + private void drain() { + // As per 3.4: this method SHOULD respect the responsiveness + // of its caller by returning in a timely manner. + // We accomplish this by a wait-free implementation. + if (draining.getAndIncrement() != 0) { + // Someone else is already draining, so do nothing, + // the other thread will notice that we attempted to drain. + // This also allows to abide by rule 3.3 and avoid + // cycles such as request() -> onNext() -> request() etc. + return; + } + int missed = 1; + // Note: when termination is detected inside this loop, + // we MUST call clear() manually. + for (; ; ) { + // The requested number of items at this point + long r = requested.get(); + // The number of items emitted thus far + long emitted = 0L; + while (emitted != r) { + if (cancelled) { + clear(); + return; + } + Object result; + try { + result = tryNext(); + } catch (Throwable t) { + doOnError(t); + clear(); + return; + } + if (result == null) { + break; + } + if (result instanceof Throwable) { + doOnError((Throwable) result); + clear(); + return; + } + doOnNext((ReactiveRow) result); + emitted++; + } + if (isExhausted()) { + doOnComplete(); + clear(); + return; + } + if (cancelled) { + clear(); + return; + } + if (emitted != 0) { + // if any item was emitted, adjust the requested field + ReactiveOperators.subCap(requested, emitted); + } + // if another thread tried to call drain() while we were busy, + // then we should do another drain round. + missed = draining.addAndGet(-missed); + if (missed == 0) { + break; + } + } + } + + /** + * Tries to return the next item, if one is readily available, and returns {@code null} otherwise. + * + *

Cannot run concurrently due to the {@link #draining} field. + */ + @Nullable + private Object tryNext() { + Page current = pages.peek(); + if (current != null) { + if (current.hasMoreRows()) { + return current.nextRow(); + } else if (current.hasMorePages()) { + // Discard current page as it is consumed. + // Don't discard the last page though as we need it + // to test isExhausted(). It will be GC'ed when a terminal signal + // is issued anyway, so that's no big deal. + if (pages.poll() == null) { + throw new AssertionError("Queue is empty, this should not happen"); + } + // if the next page is readily available, + // serve its first row now, no need to wait + // for the next drain. + return tryNext(); + } + } + // No items available right now. + return null; + } + + /** + * Returns {@code true} when the entire stream has been consumed and no more items can be emitted. + * When that is the case, a terminal signal is sent. + * + *

Cannot run concurrently due to the draining field. + */ + private boolean isExhausted() { + Page current = pages.peek(); + // Note: current can only be null when: + // 1) we are waiting for the first page and it hasn't arrived yet; + // 2) we just discarded the current page, but the next page hasn't arrived yet. + // In any case, a null here means it is not the last page, since the last page + // stays in the queue until the very end of the operation. + return current != null && !current.hasMoreRows() && !current.hasMorePages(); + } + + /** + * Runs on a subscriber thread initially, see {@link #start(Callable)}. Subsequent executions run + * on the thread that completes the pair of futures [current.fetchNextPage, pages.offer] and + * enqueues. This can be a driver IO thread or a subscriber thread; in both cases, cannot run + * concurrently due to the fact that one can only fetch the next page when the current one is + * arrived and enqueued. + */ + private void fetchNextPageAndEnqueue(@NonNull Page current, boolean firstPage) { + current + .fetchNextPage() + // as soon as the response arrives, + // create the new page + .handle( + (rs, t) -> { + Page page; + if (t == null) { + page = toPage(rs); + executionInfosSubscriber.onNext(rs.getExecutionInfo()); + if (!page.hasMorePages()) { + executionInfosSubscriber.onComplete(); + } + if (firstPage) { + columnDefinitionsSubscriber.onNext(rs.getColumnDefinitions()); + columnDefinitionsSubscriber.onComplete(); + // Avoid calling wasApplied on empty pages as some implementations may throw + // IllegalStateException; if the page is empty, this wasn't a CAS query, in which + // case, as per the method's contract, wasApplied should be true. + boolean wasApplied = rs.remaining() == 0 || rs.wasApplied(); + wasAppliedSubscriber.onNext(wasApplied); + wasAppliedSubscriber.onComplete(); + } + } else { + // Unwrap CompletionExceptions created by combined futures + if (t instanceof CompletionException) { + t = t.getCause(); + } + page = toErrorPage(t); + executionInfosSubscriber.onError(t); + if (firstPage) { + columnDefinitionsSubscriber.onError(t); + wasAppliedSubscriber.onError(t); + } + } + return page; + }) + .thenCompose(pages::offer) + .thenAccept( + page -> { + if (page.hasMorePages() && !cancelled) { + // preemptively fetch the next page, if available + fetchNextPageAndEnqueue(page, false); + } + drain(); + }); + } + + private void doOnNext(@NonNull ReactiveRow result) { + try { + mainSubscriber.onNext(result); + } catch (Throwable t) { + LOG.error( + mainSubscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", + t); + cancel(); + } + } + + private void doOnComplete() { + try { + // Then we signal onComplete as per rules 1.2 and 1.5 + mainSubscriber.onComplete(); + } catch (Throwable t) { + LOG.error( + mainSubscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", + t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + + // package-private because it can be invoked by the publisher if the subscription handshake + // process fails. + void doOnError(@NonNull Throwable error) { + try { + // Then we signal the error downstream, as per rules 1.2 and 1.4. + mainSubscriber.onError(error); + } catch (Throwable t) { + t.addSuppressed(error); + LOG.error( + mainSubscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", + t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + + private void clear() { + // We don't need these pages anymore and should not hold references + // to them. + pages.clear(); + // As per 3.13, Subscription.cancel() MUST request the Publisher to + // eventually drop any references to the corresponding subscriber. + // Our own publishers do not keep references to this subscription, + // but downstream processors might do so, which is why we need to + // defensively clear the subscriber reference when we are done. + mainSubscriber = null; + columnDefinitionsSubscriber = null; + executionInfosSubscriber = null; + wasAppliedSubscriber = null; + } + + /** + * Converts the received result object into a {@link Page}. + * + * @param rs the result object to convert. + * @return a new page. + */ + @NonNull + private Page toPage(@NonNull ResultSetT rs) { + ExecutionInfo executionInfo = rs.getExecutionInfo(); + Iterator results = + Iterators.transform( + rs.currentPage().iterator(), + row -> new DefaultReactiveRow(Objects.requireNonNull(row), executionInfo)); + return new Page<>(results, rs.hasMorePages() ? rs::fetchNextPage : null); + } + + /** Converts the given error into a {@link Page}, containing the error as its only element. */ + @NonNull + private Page toErrorPage(@NonNull Throwable t) { + return new Page<>(Iterators.singletonIterator(t), null); + } + + /** + * A page object comprises an iterator over the page's results, and a future pointing to the next + * page (or {@code null}, if it's the last page). + */ + static class Page> { + + @NonNull final Iterator iterator; + + // A pointer to the next page, or null if this is the last page. + @Nullable final Callable> nextPage; + + /** called only from start() */ + Page(@NonNull Callable> nextPage) { + this.iterator = Collections.emptyIterator(); + this.nextPage = nextPage; + } + + Page(@NonNull Iterator iterator, @Nullable Callable> nextPage) { + this.iterator = iterator; + this.nextPage = nextPage; + } + + boolean hasMorePages() { + return nextPage != null; + } + + @NonNull + CompletionStage fetchNextPage() { + try { + return Objects.requireNonNull(nextPage).call(); + } catch (Exception e) { + // This is a synchronous failure in the driver. + // It can happen in rare cases when the driver throws an exception instead of returning a + // failed future; e.g. if someone tries to execute a continuous paging request but the + // protocol version in use does not support it. + // We treat it as a failed future. + return CompletableFutures.failedFuture(e); + } + } + + boolean hasMoreRows() { + return iterator.hasNext(); + } + + @NonNull + Object nextRow() { + return iterator.next(); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java new file mode 100644 index 00000000000..845cbe2349b --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessor.java @@ -0,0 +1,265 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.reactivestreams.Processor; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A simple {@link Processor} that receives items form an upstream publisher, stores them in an + * internal queue, then serves them to one single downstream subscriber. It does not support + * multiple subscriptions. + * + *

Implementation note: this class is intended to serve as the common implementation for all + * secondary publishers exposed by the driver's reactive API, and in particular, for publishers of + * query metadata objects. Since such publishers are not critical, and usually only publish a + * handful of items, this implementation favors simplicity over efficiency (in particular, it uses + * an unbounded linked queue, but in practice there is no risk that this queue could grow + * uncontrollably). + * + * @param The type of elements received and emitted by this processor. + */ +public class SimpleUnicastProcessor + implements Processor, Subscription { + + private static final Logger LOG = LoggerFactory.getLogger(SimpleUnicastProcessor.class); + + private static final Object ON_COMPLETE = new Object(); + + private final Queue queue = new ConcurrentLinkedDeque<>(); + + private final AtomicBoolean once = new AtomicBoolean(false); + + private final AtomicInteger draining = new AtomicInteger(0); + + private final AtomicLong requested = new AtomicLong(0); + + private volatile Subscriber subscriber; + + private volatile boolean cancelled; + + @Override + public void subscribe(Subscriber subscriber) { + // As per rule 1.9, we need to throw an NPE if subscriber is null + Objects.requireNonNull(subscriber, "Subscriber cannot be null"); + // As per rule 1.11, this publisher supports only one subscriber. + if (once.compareAndSet(false, true)) { + this.subscriber = subscriber; + try { + subscriber.onSubscribe(this); + } catch (Throwable t) { + // As per rule 2.13: In the case that this rule is violated, + // any associated Subscription to the Subscriber MUST be considered as + // cancelled, and the caller MUST raise this error condition in a fashion + // that is adequate for the runtime environment. + doOnError( + new IllegalStateException( + subscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", + t)); + } + } else { + subscriber.onSubscribe(EmptySubscription.INSTANCE); + subscriber.onError( + new IllegalStateException("This publisher does not support multiple subscriptions")); + } + // As per 2.13, this method must return normally (i.e. not throw) + } + + @Override + public void onSubscribe(Subscription s) { + // no-op + } + + @Override + public void onNext(ElementT value) { + if (!cancelled) { + queue.offer(value); + drain(); + } + } + + @Override + public void onError(Throwable error) { + if (!cancelled) { + queue.offer(error); + drain(); + } + } + + @Override + public void onComplete() { + if (!cancelled) { + queue.offer(ON_COMPLETE); + drain(); + } + } + + @Override + public void request(long n) { + // As per 3.6: after the Subscription is cancelled, additional + // calls to request() MUST be NOPs. + if (!cancelled) { + if (n < 1) { + // Validate request as per rule 3.9 + doOnError( + new IllegalArgumentException( + subscriber + + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); + } else { + // As per rule 3.17, when demand overflows Long.MAX_VALUE + // it can be treated as "effectively unbounded" + ReactiveOperators.addCap(requested, n); + drain(); + } + } + } + + @Override + public void cancel() { + // As per 3.5: Subscription.cancel() MUST respect the responsiveness of + // its caller by returning in a timely manner, MUST be idempotent and + // MUST be thread-safe. + if (!cancelled) { + cancelled = true; + if (draining.getAndIncrement() == 0) { + // If nobody is draining, clear now; + // otherwise, the draining thread will notice + // that the cancelled flag was set + // and will clear for us. + clear(); + } + } + } + + @SuppressWarnings("ConditionalBreakInInfiniteLoop") + private void drain() { + if (draining.getAndIncrement() != 0) { + return; + } + int missed = 1; + for (; ; ) { + // Note: when termination is detected inside this loop, + // we MUST call clear() manually. + long requested = this.requested.get(); + long emitted = 0L; + while (requested != emitted) { + if (cancelled) { + clear(); + return; + } + Object t = queue.poll(); + if (t == null) { + break; + } + if (t instanceof Throwable) { + Throwable error = (Throwable) t; + doOnError(error); + clear(); + return; + } else if (t == ON_COMPLETE) { + doOnComplete(); + clear(); + return; + } else { + @SuppressWarnings("unchecked") + ElementT item = (ElementT) t; + doOnNext(item); + emitted++; + } + } + if (cancelled) { + clear(); + return; + } + if (emitted != 0) { + // if any item was emitted, adjust the requested field + ReactiveOperators.subCap(this.requested, emitted); + } + // if another thread tried to call drain() while we were busy, + // then we should do another drain round. + missed = draining.addAndGet(-missed); + if (missed == 0) { + break; + } + } + } + + private void doOnNext(@NonNull ElementT result) { + try { + subscriber.onNext(result); + } catch (Throwable t) { + LOG.error( + subscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", + t); + cancel(); + } + } + + private void doOnComplete() { + try { + // Then we signal onComplete as per rules 1.2 and 1.5 + subscriber.onComplete(); + } catch (Throwable t) { + LOG.error( + subscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", + t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + + private void doOnError(@NonNull Throwable error) { + try { + // Then we signal the error downstream, as per rules 1.2 and 1.4. + subscriber.onError(error); + } catch (Throwable t) { + t.addSuppressed(error); + LOG.error( + subscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", + t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + + private void clear() { + // We don't need the elements anymore and should not hold references + // to them. + queue.clear(); + // As per 3.13, Subscription.cancel() MUST request the Publisher to + // eventually drop any references to the corresponding subscriber. + // Our own publishers do not keep references to this subscription, + // but downstream processors might do so, which is why we need to + // defensively clear the subscriber reference when we are done. + subscriber = null; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java new file mode 100644 index 00000000000..885d9bd48b7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultGeometry.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.esri.core.geometry.GeometryException; +import com.esri.core.geometry.SpatialReference; +import com.esri.core.geometry.ogc.OGCGeometry; +import com.esri.core.geometry.ogc.OGCLineString; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.nio.ByteBuffer; +import net.jcip.annotations.Immutable; + +@Immutable +public abstract class DefaultGeometry implements Geometry, Serializable { + + private static final long serialVersionUID = 1L; + + /** + * Default spatial reference for Well Known Text / Well Known Binary. + * + *

4326 is the EPSG identifier of the World Geodetic System (WGS) in + * its later revision, WGS 84. + */ + public static final SpatialReference SPATIAL_REFERENCE_4326 = SpatialReference.create(4326); + + @NonNull + public static T fromOgcWellKnownText( + @NonNull String source, @NonNull Class klass) { + OGCGeometry geometry; + try { + geometry = OGCGeometry.fromText(source); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException(e.getMessage()); + } + validateType(geometry, klass); + return klass.cast(geometry); + } + + @NonNull + public static T fromOgcWellKnownBinary( + @NonNull ByteBuffer source, @NonNull Class klass) { + OGCGeometry geometry; + try { + geometry = OGCGeometry.fromBinary(source); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException(e.getMessage()); + } + validateType(geometry, klass); + return klass.cast(geometry); + } + + @NonNull + public static T fromOgcGeoJson( + @NonNull String source, @NonNull Class klass) { + OGCGeometry geometry; + try { + geometry = OGCGeometry.fromGeoJson(source); + } catch (Exception e) { + throw new IllegalArgumentException(e.getMessage()); + } + validateType(geometry, klass); + return klass.cast(geometry); + } + + private static void validateType(OGCGeometry geometry, Class klass) { + if (!geometry.getClass().equals(klass)) { + throw new IllegalArgumentException( + String.format( + "%s is not of type %s", geometry.getClass().getSimpleName(), klass.getSimpleName())); + } + } + + private final OGCGeometry ogcGeometry; + + protected DefaultGeometry(@NonNull OGCGeometry ogcGeometry) { + this.ogcGeometry = ogcGeometry; + Preconditions.checkNotNull(ogcGeometry); + validateOgcGeometry(ogcGeometry); + } + + private static void validateOgcGeometry(OGCGeometry geometry) { + try { + if (geometry.is3D()) { + throw new IllegalArgumentException(String.format("'%s' is not 2D", geometry.asText())); + } + if (!geometry.isSimple()) { + throw new IllegalArgumentException( + String.format( + "'%s' is not simple. Points and edges cannot self-intersect.", geometry.asText())); + } + } catch (GeometryException e) { + throw new IllegalArgumentException("Invalid geometry" + e.getMessage()); + } + } + + @NonNull + public static ImmutableList getPoints(@NonNull OGCLineString lineString) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (int i = 0; i < lineString.numPoints(); i++) { + builder.add(new DefaultPoint(lineString.pointN(i))); + } + return builder.build(); + } + + protected static com.esri.core.geometry.Point toEsri(Point p) { + return new com.esri.core.geometry.Point(p.X(), p.Y()); + } + + @NonNull + public OGCGeometry getOgcGeometry() { + return ogcGeometry; + } + + @NonNull + public com.esri.core.geometry.Geometry getEsriGeometry() { + return ogcGeometry.getEsriGeometry(); + } + + @NonNull + @Override + public String asWellKnownText() { + return ogcGeometry.asText(); + } + + @NonNull + @Override + public ByteBuffer asWellKnownBinary() { + return WkbUtil.asLittleEndianBinary(ogcGeometry); + } + + @NonNull + @Override + public String asGeoJson() { + return ogcGeometry.asGeoJson(); + } + + @Override + public boolean contains(@NonNull Geometry other) { + Preconditions.checkNotNull(other); + if (other instanceof DefaultGeometry) { + DefaultGeometry defautlOther = (DefaultGeometry) other; + return getOgcGeometry().contains(defautlOther.getOgcGeometry()); + } + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DefaultGeometry)) { + return false; + } + DefaultGeometry that = (DefaultGeometry) o; + return this.getOgcGeometry().equals(that.getOgcGeometry()); + } + + @Override + public int hashCode() { + // OGCGeometry subclasses do not overwrite Object.hashCode() + // while com.esri.core.geometry.Geometry subclasses usually do, + // so use these instead; this is consistent with equals + // because OGCGeometry.equals() actually compare between + // com.esri.core.geometry.Geometry objects + return getEsriGeometry().hashCode(); + } + + // Should never be called since we serialize a proxy (see subclasses) + @SuppressWarnings("UnusedVariable") + private void readObject(ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + @Override + public String toString() { + return asWellKnownText(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java new file mode 100644 index 00000000000..1cf64bb366d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineString.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.esri.core.geometry.Polyline; +import com.esri.core.geometry.ogc.OGCLineString; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultLineString extends DefaultGeometry implements LineString { + + private static final long serialVersionUID = 1280189361978382248L; + + private static OGCLineString fromPoints(Point p1, Point p2, Point... pn) { + Polyline polyline = new Polyline(toEsri(p1), toEsri(p2)); + for (Point p : pn) { + polyline.lineTo(toEsri(p)); + } + return new OGCLineString(polyline, 0, DefaultGeometry.SPATIAL_REFERENCE_4326); + } + + private final List points; + + public DefaultLineString(@NonNull Point p1, @NonNull Point p2, @NonNull Point... pn) { + super(fromPoints(p1, p2, pn)); + this.points = ImmutableList.builder().add(p1).add(p2).add(pn).build(); + } + + public DefaultLineString(@NonNull OGCLineString lineString) { + super(lineString); + this.points = getPoints(lineString); + } + + @NonNull + @Override + public List getPoints() { + return points; + } + + /** + * This object gets replaced by an internal proxy for serialization. + * + * @serialData a single byte array containing the Well-Known Binary representation. + */ + private Object writeReplace() { + return new WkbSerializationProxy(this.asWellKnownBinary()); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java new file mode 100644 index 00000000000..c9540b10d8a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPoint.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.esri.core.geometry.ogc.OGCPoint; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultPoint extends DefaultGeometry implements Point { + + private static final long serialVersionUID = -8337622213980781285L; + + public DefaultPoint(double x, double y) { + this( + new OGCPoint( + new com.esri.core.geometry.Point(x, y), DefaultGeometry.SPATIAL_REFERENCE_4326)); + } + + public DefaultPoint(@NonNull OGCPoint point) { + super(point); + } + + @NonNull + @Override + public OGCPoint getOgcGeometry() { + return (OGCPoint) super.getOgcGeometry(); + } + + @Override + public double X() { + return getOgcGeometry().X(); + } + + @Override + public double Y() { + return getOgcGeometry().Y(); + } + + /** + * This object gets replaced by an internal proxy for serialization. + * + * @serialData a single byte array containing the Well-Known Binary representation. + */ + private Object writeReplace() { + return new WkbSerializationProxy(this.asWellKnownBinary()); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java new file mode 100644 index 00000000000..27d375d42b1 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygon.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.esri.core.geometry.Operator; +import com.esri.core.geometry.OperatorFactoryLocal; +import com.esri.core.geometry.OperatorSimplifyOGC; +import com.esri.core.geometry.ogc.OGCPolygon; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.List; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultPolygon extends DefaultGeometry implements Polygon { + + private static final long serialVersionUID = 3694196802962890314L; + + private final List exteriorRing; + private final List> interiorRings; + + public DefaultPolygon( + @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { + super(fromPoints(p1, p2, p3, pn)); + this.exteriorRing = ImmutableList.builder().add(p1).add(p2).add(p3).add(pn).build(); + this.interiorRings = Collections.emptyList(); + } + + public DefaultPolygon(@NonNull OGCPolygon polygon) { + super(polygon); + if (polygon.isEmpty()) { + this.exteriorRing = ImmutableList.of(); + } else { + this.exteriorRing = getPoints(polygon.exteriorRing()); + } + + ImmutableList.Builder> builder = ImmutableList.builder(); + for (int i = 0; i < polygon.numInteriorRing(); i++) { + builder.add(getPoints(polygon.interiorRingN(i))); + } + this.interiorRings = builder.build(); + } + + @NonNull + @Override + public List getExteriorRing() { + return exteriorRing; + } + + @NonNull + @Override + public List> getInteriorRings() { + return interiorRings; + } + + private static OGCPolygon fromPoints(Point p1, Point p2, Point p3, Point... pn) { + com.esri.core.geometry.Polygon polygon = new com.esri.core.geometry.Polygon(); + addPath(polygon, p1, p2, p3, pn); + return new OGCPolygon(simplify(polygon), DefaultGeometry.SPATIAL_REFERENCE_4326); + } + + private static void addPath( + com.esri.core.geometry.Polygon polygon, Point p1, Point p2, Point p3, Point[] pn) { + + polygon.startPath(toEsri(p1)); + polygon.lineTo(toEsri(p2)); + polygon.lineTo(toEsri(p3)); + for (Point p : pn) { + polygon.lineTo(toEsri(p)); + } + } + + private static com.esri.core.geometry.Polygon simplify(com.esri.core.geometry.Polygon polygon) { + OperatorSimplifyOGC op = + (OperatorSimplifyOGC) + OperatorFactoryLocal.getInstance().getOperator(Operator.Type.SimplifyOGC); + return (com.esri.core.geometry.Polygon) + op.execute(polygon, DefaultGeometry.SPATIAL_REFERENCE_4326, true, null); + } + + /** + * This object gets replaced by an internal proxy for serialization. + * + * @serialData a single byte array containing the Well-Known Binary representation. + */ + private Object writeReplace() { + return new WkbSerializationProxy(this.asWellKnownBinary()); + } + + public static class Builder implements Polygon.Builder { + private final com.esri.core.geometry.Polygon polygon = new com.esri.core.geometry.Polygon(); + + @NonNull + @Override + public Builder addRing( + @NonNull Point p1, @NonNull Point p2, @NonNull Point p3, @NonNull Point... pn) { + addPath(polygon, p1, p2, p3, pn); + return this; + } + + /** + * Builds the polygon. + * + * @return the polygon. + */ + @NonNull + @Override + public Polygon build() { + return new DefaultPolygon( + new OGCPolygon(simplify(polygon), DefaultGeometry.SPATIAL_REFERENCE_4326)); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java new file mode 100644 index 00000000000..518f6aa1346 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/Distance.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import static java.util.regex.Pattern.CASE_INSENSITIVE; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.graph.predicates.Geo; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.esri.core.geometry.MultiPath; +import com.esri.core.geometry.ogc.OGCGeometry; +import com.esri.core.geometry.ogc.OGCPoint; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import net.jcip.annotations.Immutable; + +/** + * The driver-side representation of DSE's {@code Geo.distance}. + * + *

This is a circle in a two-dimensional XY plane represented by its center point and radius. It + * is used as a search criteria to determine whether or not another geospatial object lies within a + * circular area. + * + *

Note that this shape has no equivalent in the OGC and GeoJSON standards: as a consequence, + * {@link #asWellKnownText()} returns a custom format, and {@link #getOgcGeometry()}, {@link + * #asWellKnownBinary()}, and {@link #asGeoJson()} throw {@link UnsupportedOperationException}. + * + *

Unlike other geo types, this class is never exposed directly to driver clients: it is used + * internally by {@linkplain Geo#inside(Point, double) geo predicates}, but cannot be a column type, + * nor appear in CQL or graph results. Therefore it doesn't have a public-facing interface, nor a + * built-in codec. + */ +@Immutable +public class Distance extends DefaultGeometry { + + private static final Pattern WKT_PATTERN = + Pattern.compile( + "distance *\\( *\\( *([\\d\\.-]+) *([\\d+\\.-]+) *\\) *([\\d+\\.-]+) *\\)", + CASE_INSENSITIVE); + + /** + * Creates a distance from its Well-known + * Text (WKT) representation. + * + * @param source the Well-known Text representation to parse. + * @return the point represented by the WKT. + * @throws IllegalArgumentException if the string does not contain a valid Well-known Text + * representation. + * @see Distance#asWellKnownText() + */ + @NonNull + public static Distance fromWellKnownText(@NonNull String source) { + Matcher matcher = WKT_PATTERN.matcher(source.trim()); + if (matcher.matches() && matcher.groupCount() == 3) { + try { + return new Distance( + new DefaultPoint( + Double.parseDouble(matcher.group(1)), Double.parseDouble(matcher.group(2))), + Double.parseDouble(matcher.group(3))); + } catch (NumberFormatException var3) { + throw new IllegalArgumentException(String.format("Unable to parse %s", source)); + } + } else { + throw new IllegalArgumentException(String.format("Unable to parse %s", source)); + } + } + + private final DefaultPoint center; + + private final double radius; + + /** + * Creates a new distance with the given center and radius. + * + * @param center The center point. + * @param radius The radius of the circle representing distance. + */ + public Distance(@NonNull Point center, double radius) { + super(((DefaultPoint) center).getOgcGeometry()); + Preconditions.checkNotNull(center); + Preconditions.checkArgument(radius >= 0.0D, "Radius must be >= 0 (got %s)", radius); + this.center = ((DefaultPoint) center); + this.radius = radius; + } + + /** @return The center point of the circle representing this distance. */ + @NonNull + public Point getCenter() { + return center; + } + + /** @return The radius of the circle representing this distance. */ + public double getRadius() { + return radius; + } + + /** + * Returns a Well-known Text (WKT) + * representation of this geospatial type. + * + *

Since there is no Well-known Text specification for Distance, this returns a custom format + * of: DISTANCE((center.x center.y) radius) + * + * @return a Well-known Text representation of this object. + */ + @NonNull + @Override + public String asWellKnownText() { + return String.format("DISTANCE((%s %s) %s)", this.center.X(), this.center.Y(), this.radius); + } + + /** + * The distance type has no equivalent in the OGC standard: this method throws an {@link + * UnsupportedOperationException}. + */ + @NonNull + @Override + public OGCGeometry getOgcGeometry() { + throw new UnsupportedOperationException(); + } + + /** + * The distance type has no equivalent in the OGC standard: this method throws an {@link + * UnsupportedOperationException}. + */ + @NonNull + @Override + public ByteBuffer asWellKnownBinary() { + throw new UnsupportedOperationException(); + } + + /** + * The distance type has no equivalent in the GeoJSON standard: this method throws an {@link + * UnsupportedOperationException}. + */ + @Override + @NonNull + public String asGeoJson() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof Distance) { + Distance that = (Distance) other; + return Objects.equals(this.center, that.center) && this.radius == that.radius; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(center, radius); + } + + @SuppressWarnings("SimplifiableConditionalExpression") + @Override + public boolean contains(@NonNull Geometry geometry) { + return geometry instanceof Distance + ? this.containsDistance((Distance) geometry) + : geometry instanceof Point + ? this.containsPoint((Point) geometry) + : geometry instanceof LineString + ? this.containsLineString((LineString) geometry) + : geometry instanceof Polygon ? this.containsPolygon((Polygon) geometry) : false; + } + + private boolean containsDistance(Distance distance) { + return this.center.getOgcGeometry().distance(distance.center.getOgcGeometry()) + distance.radius + <= this.radius; + } + + private boolean containsPoint(Point point) { + return this.containsOGCPoint(((DefaultPoint) point).getOgcGeometry()); + } + + private boolean containsLineString(LineString lineString) { + MultiPath multiPath = + (MultiPath) ((DefaultLineString) lineString).getOgcGeometry().getEsriGeometry(); + return containsMultiPath(multiPath); + } + + private boolean containsPolygon(Polygon polygon) { + MultiPath multiPath = + (com.esri.core.geometry.Polygon) + ((DefaultPolygon) polygon).getOgcGeometry().getEsriGeometry(); + return containsMultiPath(multiPath); + } + + private boolean containsMultiPath(MultiPath multiPath) { + int numPoints = multiPath.getPointCount(); + for (int i = 0; i < numPoints; ++i) { + OGCPoint point = new OGCPoint(multiPath.getPoint(i), DefaultGeometry.SPATIAL_REFERENCE_4326); + if (!this.containsOGCPoint(point)) { + return false; + } + } + return true; + } + + private boolean containsOGCPoint(OGCPoint point) { + return this.center.getOgcGeometry().distance(point) <= this.radius; + } + + /** + * This object gets replaced by an internal proxy for serialization. + * + * @serialData Point (wkb) for center followed by double for radius + */ + private Object writeReplace() { + return new DistanceSerializationProxy(this); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java new file mode 100644 index 00000000000..515af121980 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceSerializationProxy.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import java.io.Serializable; + +/** + * A thin wrapper around {@link Distance}, that gets substituted during the serialization / + * deserialization process. This allows {@link Distance} to be immutable and reference centers' OGC + * counterpart. + */ +public class DistanceSerializationProxy implements Serializable { + + private static final long serialVersionUID = 1L; + + private final Point center; + private final double radius; + + public DistanceSerializationProxy(Distance distance) { + this.center = distance.getCenter(); + this.radius = distance.getRadius(); + } + + private Object readResolve() { + return new Distance(center, radius); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java new file mode 100644 index 00000000000..92c0f6de2d5 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbSerializationProxy.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import net.jcip.annotations.Immutable; + +/** + * A thin wrapper around a Well-Known Binary byte sequence, that gets substituted for {@link + * DefaultGeometry} instances during the serialization / deserialization process. This allows + * immutable geometry classes. + */ +@Immutable +class WkbSerializationProxy implements Serializable { + + private static final long serialVersionUID = 1L; + + private final byte[] wkb; + + WkbSerializationProxy(ByteBuffer wkb) { + this.wkb = Bytes.getArray(wkb); + } + + private Object readResolve() { + ByteBuffer buffer = ByteBuffer.wrap(wkb).order(ByteOrder.nativeOrder()); + int type = buffer.getInt(1); + + if (type == 1) { + return Point.fromWellKnownBinary(buffer); + } else if (type == 2) { + return LineString.fromWellKnownBinary(buffer); + } else if (type == 3) { + return Polygon.fromWellKnownBinary(buffer); + } else { + throw new IllegalArgumentException( + "Unknown geospatial type code in serialized form: " + type); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java new file mode 100644 index 00000000000..3f18b32fda2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/data/geometry/WkbUtil.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import com.esri.core.geometry.Geometry; +import com.esri.core.geometry.Operator; +import com.esri.core.geometry.OperatorExportToWkb; +import com.esri.core.geometry.OperatorFactoryLocal; +import com.esri.core.geometry.WkbExportFlags; +import com.esri.core.geometry.ogc.OGCGeometry; +import com.esri.core.geometry.ogc.OGCLineString; +import com.esri.core.geometry.ogc.OGCPoint; +import com.esri.core.geometry.ogc.OGCPolygon; +import java.lang.reflect.Method; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +/** + * Helper class to serialize OGC geometries to Well-Known Binary, forcing the byte order to little + * endian. + * + *

WKB encodes the byte order, so in theory we could send the buffer in any order, even if it is + * different from the server. However DSE server performs an additional validation step server-side: + * it deserializes to Java, serializes back to WKB, and then compares the original buffer to the + * "re-serialized" one. If they don't match, a MarshalException is thrown. So with a client in + * big-endian and a server in little-endian, we would get: + * + *

+ * incoming buffer (big endian) --> Java --> reserialized buffer (little endian)
+ * 
+ * + * Since the two buffers have a different endian-ness, they don't match. + * + *

The ESRI library defaults to the native byte order and doesn't let us change it. Therefore: + * + *

    + *
  • if the native order is little endian (vast majority of cases), this class simply delegates + * to the appropriate public API method; + *
  • if the native order is big endian, it re-implements the serialization code, using + * reflection to get access to a private method. If reflection fails for any reason (updated + * ESRI library, security manager...), a runtime exception will be thrown. + *
+ */ +class WkbUtil { + + private static final boolean IS_NATIVE_LITTLE_ENDIAN = + ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN) + && System.getProperty("com.datastax.driver.dse.geometry.FORCE_REFLECTION_WKB") + == null; // only for tests + + static ByteBuffer asLittleEndianBinary(OGCGeometry ogcGeometry) { + if (IS_NATIVE_LITTLE_ENDIAN) { + return ogcGeometry.asBinary(); // the default implementation does what we want + } else { + int exportFlags; + if (ogcGeometry instanceof OGCPoint) { + exportFlags = 0; + } else if (ogcGeometry instanceof OGCLineString) { + exportFlags = WkbExportFlags.wkbExportLineString; + } else if (ogcGeometry instanceof OGCPolygon) { + exportFlags = WkbExportFlags.wkbExportPolygon; + } else { + throw new AssertionError("Unsupported type: " + ogcGeometry.getClass()); + } + + // Copy-pasted from OperatorExportToWkbLocal#execute, except for the flags and order + int size = exportToWKB(exportFlags, ogcGeometry.getEsriGeometry(), null); + ByteBuffer wkbBuffer = ByteBuffer.allocate(size).order(ByteOrder.LITTLE_ENDIAN); + exportToWKB(exportFlags, ogcGeometry.getEsriGeometry(), wkbBuffer); + return wkbBuffer; + } + } + + // Provides reflective access to the private static method OperatorExportToWkbLocal#exportToWKB + private static int exportToWKB(int exportFlags, Geometry geometry, ByteBuffer wkbBuffer) { + assert !IS_NATIVE_LITTLE_ENDIAN; + try { + return (Integer) exportToWKB.invoke(null, exportFlags, geometry, wkbBuffer); + } catch (Exception e) { + throw new RuntimeException( + "Couldn't invoke private method OperatorExportToWkbLocal#exportToWKB", e); + } + } + + private static final Method exportToWKB; + + static { + if (IS_NATIVE_LITTLE_ENDIAN) { + exportToWKB = null; // won't be used + } else { + try { + OperatorExportToWkb op = + (OperatorExportToWkb) + OperatorFactoryLocal.getInstance().getOperator(Operator.Type.ExportToWkb); + exportToWKB = + op.getClass() + .getDeclaredMethod("exportToWKB", int.class, Geometry.class, ByteBuffer.class); + exportToWKB.setAccessible(true); + } catch (NoSuchMethodException e) { + throw new RuntimeException( + "Couldn't get access to private method OperatorExportToWkbLocal#exportToWKB", e); + } + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java new file mode 100644 index 00000000000..333ba6099d3 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ByteBufUtil.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import java.nio.ByteBuffer; + +public class ByteBufUtil { + + // Does not move the reader index of the ByteBuf parameter + public static ByteBuffer toByteBuffer(ByteBuf buffer) { + if (buffer.isDirect()) { + return buffer.nioBuffer(); + } + final byte[] bytes = new byte[buffer.readableBytes()]; + buffer.getBytes(buffer.readerIndex(), bytes); + return ByteBuffer.wrap(bytes); + } + + static ByteBuf toByteBuf(ByteBuffer buffer) { + return Unpooled.wrappedBuffer(buffer); + } + + // read a predefined amount of bytes from the netty buffer and move its readerIndex + public static ByteBuffer readBytes(ByteBuf nettyBuf, int size) { + ByteBuffer res = ByteBuffer.allocate(size); + nettyBuf.readBytes(res); + res.flip(); + return res; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java new file mode 100644 index 00000000000..b6fe05a987c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/BytecodeGraphStatement.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Collections; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.Bytecode; + +/** + * A dedicated statement implementation for implicit traversal execution via a {@link + * DseGraphRemoteConnection}. + * + *

This is a simplified version of {@link FluentGraphStatement} that exposes the bytecode + * directly instead of the traversal. + * + *

This class is for internal use only. + */ +public class BytecodeGraphStatement extends GraphStatementBase { + + private final Bytecode bytecode; + + public BytecodeGraphStatement( + Bytecode bytecode, DriverExecutionProfile executionProfile, String executionProfileName) { + this( + bytecode, + null, + null, + null, + Statement.NO_DEFAULT_TIMESTAMP, + executionProfile, + executionProfileName, + Collections.emptyMap(), + null, + null, + null, + null, + null, + null); + } + + private BytecodeGraphStatement( + Bytecode bytecode, + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + super( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + this.bytecode = bytecode; + } + + public Bytecode getBytecode() { + return bytecode; + } + + @Override + protected BytecodeGraphStatement newInstance( + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + return new BytecodeGraphStatement( + bytecode, + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java new file mode 100644 index 00000000000..9c7f773c3a2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousAsyncGraphResultSet.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.Queue; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe // wraps a mutable queue +public class ContinuousAsyncGraphResultSet implements AsyncGraphResultSet { + + private final CountingIterator iterator; + private final int pageNumber; + private final boolean hasMorePages; + private final ExecutionInfo executionInfo; + private final ContinuousGraphRequestHandler continuousGraphRequestHandler; + private final Iterable currentPage; + + public ContinuousAsyncGraphResultSet( + ExecutionInfo executionInfo, + Queue data, + int pageNumber, + boolean hasMorePages, + ContinuousGraphRequestHandler continuousGraphRequestHandler, + GraphProtocol graphProtocol) { + + this.iterator = new GraphResultIterator(data, graphProtocol); + this.pageNumber = pageNumber; + this.hasMorePages = hasMorePages; + this.executionInfo = executionInfo; + this.continuousGraphRequestHandler = continuousGraphRequestHandler; + this.currentPage = () -> iterator; + } + + @NonNull + @Override + public ExecutionInfo getRequestExecutionInfo() { + return executionInfo; + } + + @NonNull + @Override + @Deprecated + public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { + return GraphExecutionInfoConverter.convert(executionInfo); + } + + @Override + public int remaining() { + return iterator.remaining(); + } + + @NonNull + @Override + public Iterable currentPage() { + return currentPage; + } + + @Override + public boolean hasMorePages() { + return hasMorePages; + } + + @NonNull + @Override + public CompletionStage fetchNextPage() throws IllegalStateException { + if (!hasMorePages()) { + throw new IllegalStateException( + "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); + } + return continuousGraphRequestHandler.fetchNextPage(); + } + + @Override + public void cancel() { + continuousGraphRequestHandler.cancel(); + } + + /** Returns the current page's number. Pages are numbered starting from 1. */ + public int pageNumber() { + return pageNumber; + } + + static AsyncGraphResultSet empty(ExecutionInfo executionInfo) { + + return new AsyncGraphResultSet() { + + @NonNull + @Override + public ExecutionInfo getRequestExecutionInfo() { + return executionInfo; + } + + @NonNull + @Override + @Deprecated + public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { + return GraphExecutionInfoConverter.convert(executionInfo); + } + + @NonNull + @Override + public Iterable currentPage() { + return Collections.emptyList(); + } + + @Override + public int remaining() { + return 0; + } + + @Override + public boolean hasMorePages() { + return false; + } + + @NonNull + @Override + public CompletionStage fetchNextPage() throws IllegalStateException { + throw new IllegalStateException( + "Can't call fetchNextPage() on the last page (use hasMorePages() to check)"); + } + + @Override + public void cancel() { + // noop + } + }; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java new file mode 100644 index 00000000000..07d9e4c84a3 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandler.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.response.result.Rows; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.ArrayDeque; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import net.jcip.annotations.ThreadSafe; + +/** + * Handles a Graph request that supports multiple response messages (a.k.a. continuous paging + * request). + */ +@ThreadSafe +public class ContinuousGraphRequestHandler + extends ContinuousRequestHandlerBase, AsyncGraphResultSet> { + + private final GraphBinaryModule graphBinaryModule; + private final GraphSupportChecker graphSupportChecker; + private final Duration globalTimeout; + + ContinuousGraphRequestHandler( + @NonNull GraphStatement statement, + @NonNull DefaultSession session, + @NonNull InternalDriverContext context, + @NonNull String sessionLogPrefix, + @NonNull GraphBinaryModule graphBinaryModule, + @NonNull GraphSupportChecker graphSupportChecker) { + super( + statement, + session, + context, + sessionLogPrefix, + AsyncGraphResultSet.class, + true, + DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, + DseSessionMetric.GRAPH_REQUESTS, + DseNodeMetric.GRAPH_MESSAGES); + this.graphBinaryModule = graphBinaryModule; + this.graphSupportChecker = graphSupportChecker; + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + globalTimeout = + MoreObjects.firstNonNull( + statement.getTimeout(), + executionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)); + // NOTE that ordering of the following statement matters. + // We should register this request after all fields have been initialized. + throttler.register(this); + } + + @NonNull + @Override + protected Duration getGlobalTimeout() { + return globalTimeout; + } + + @NonNull + @Override + protected Duration getPageTimeout(@NonNull GraphStatement statement, int pageNumber) { + return Duration.ZERO; + } + + @NonNull + @Override + protected Duration getReviseRequestTimeout(@NonNull GraphStatement statement) { + return Duration.ZERO; + } + + @Override + protected int getMaxEnqueuedPages(@NonNull GraphStatement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return executionProfile.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); + } + + @Override + protected int getMaxPages(@NonNull GraphStatement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return executionProfile.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES); + } + + @NonNull + @Override + protected Message getMessage(@NonNull GraphStatement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + GraphProtocol subProtocol = + graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); + return GraphConversions.createContinuousMessageFromGraphStatement( + statement, subProtocol, executionProfile, context, graphBinaryModule); + } + + @Override + protected boolean isTracingEnabled(@NonNull GraphStatement statement) { + return statement.isTracing(); + } + + @NonNull + @Override + protected Map createPayload(@NonNull GraphStatement statement) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + GraphProtocol subProtocol = + graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); + return GraphConversions.createCustomPayload( + statement, subProtocol, executionProfile, context, graphBinaryModule); + } + + @NonNull + @Override + protected AsyncGraphResultSet createEmptyResultSet(@NonNull ExecutionInfo executionInfo) { + return ContinuousAsyncGraphResultSet.empty(executionInfo); + } + + @NonNull + @Override + protected ContinuousAsyncGraphResultSet createResultSet( + @NonNull GraphStatement statement, + @NonNull Rows rows, + @NonNull ExecutionInfo executionInfo, + @NonNull ColumnDefinitions columnDefinitions) + throws IOException { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + GraphProtocol subProtocol = + graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); + + Queue graphNodes = new ArrayDeque<>(); + for (List row : rows.getData()) { + if (subProtocol.isGraphBinary()) { + graphNodes.offer(GraphConversions.createGraphBinaryGraphNode(row, this.graphBinaryModule)); + } else { + graphNodes.offer(GraphSONUtils.createGraphNode(row, subProtocol)); + } + } + + DseRowsMetadata metadata = (DseRowsMetadata) rows.getMetadata(); + return new ContinuousAsyncGraphResultSet( + executionInfo, + graphNodes, + metadata.continuousPageNumber, + !metadata.isLastContinuousPage, + this, + subProtocol); + } + + @Override + protected int pageNumber(@NonNull AsyncGraphResultSet resultSet) { + return ((ContinuousAsyncGraphResultSet) resultSet).pageNumber(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java new file mode 100644 index 00000000000..349321da0cf --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/CqlCollectionPredicate.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import org.javatuples.Pair; + +/** Predicates that can be used on CQL Collections. */ +public enum CqlCollectionPredicate implements DsePredicate { + contains { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + Preconditions.checkArgument(value instanceof Collection); + return ((Collection) value).contains(condition); + } + }, + + containsKey { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + Preconditions.checkArgument(value instanceof Map); + return ((Map) value).containsKey(condition); + } + }, + + containsValue { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + Preconditions.checkArgument(value instanceof Map); + return ((Map) value).containsValue(condition); + } + }, + + entryEq { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + Preconditions.checkArgument(condition instanceof Pair); + Preconditions.checkArgument(value instanceof Map); + Pair pair = (Pair) condition; + Map map = (Map) value; + return Objects.equals(map.get(pair.getValue0()), pair.getValue1()); + } + }; + + @Override + public boolean isValidCondition(Object condition) { + if (condition instanceof Pair) { + Pair pair = (Pair) condition; + return pair.getValue0() != null && pair.getValue1() != null; + } + return condition != null; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java new file mode 100644 index 00000000000..abc7cc9514e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultAsyncGraphResultSet.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Queue; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe // wraps a mutable queue +public class DefaultAsyncGraphResultSet implements AsyncGraphResultSet { + + private final ExecutionInfo executionInfo; + private final CountingIterator iterator; + private final Iterable currentPage; + + public DefaultAsyncGraphResultSet( + ExecutionInfo executionInfo, Queue data, GraphProtocol graphProtocol) { + this.executionInfo = executionInfo; + this.iterator = new GraphResultIterator(data, graphProtocol); + this.currentPage = () -> iterator; + } + + @NonNull + @Override + public ExecutionInfo getRequestExecutionInfo() { + return executionInfo; + } + + @NonNull + @Override + @Deprecated + public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { + return GraphExecutionInfoConverter.convert(executionInfo); + } + + @Override + public int remaining() { + return iterator.remaining(); + } + + @NonNull + @Override + public Iterable currentPage() { + return currentPage; + } + + @Override + public boolean hasMorePages() { + return false; + } + + @NonNull + @Override + public CompletionStage fetchNextPage() throws IllegalStateException { + throw new IllegalStateException( + "No next page. Use #hasMorePages before calling this method to avoid this error."); + } + + @Override + public void cancel() { + // nothing to do + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java new file mode 100644 index 00000000000..e16287c415d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultBatchGraphStatement.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import net.jcip.annotations.Immutable; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +@Immutable +public class DefaultBatchGraphStatement extends GraphStatementBase + implements BatchGraphStatement { + + private final List traversals; + + public DefaultBatchGraphStatement( + Iterable traversals, + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + super( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + this.traversals = ImmutableList.copyOf(traversals); + } + + @NonNull + @Override + public DefaultBatchGraphStatement addTraversal(@NonNull GraphTraversal newTraversal) { + return new DefaultBatchGraphStatement( + ImmutableList.builder().addAll(traversals).add(newTraversal).build(), + isIdempotent(), + getTimeout(), + getNode(), + getTimestamp(), + getExecutionProfile(), + getExecutionProfileName(), + getCustomPayload(), + getGraphName(), + getTraversalSource(), + getSubProtocol(), + getConsistencyLevel(), + getReadConsistencyLevel(), + getWriteConsistencyLevel()); + } + + @NonNull + @Override + public DefaultBatchGraphStatement addTraversals(@NonNull Iterable newTraversals) { + return new DefaultBatchGraphStatement( + ImmutableList.builder().addAll(traversals).addAll(newTraversals).build(), + isIdempotent(), + getTimeout(), + getNode(), + getTimestamp(), + getExecutionProfile(), + getExecutionProfileName(), + getCustomPayload(), + getGraphName(), + getTraversalSource(), + getSubProtocol(), + getConsistencyLevel(), + getReadConsistencyLevel(), + getWriteConsistencyLevel()); + } + + @Override + public int size() { + return this.traversals.size(); + } + + @Override + protected BatchGraphStatement newInstance( + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + return new DefaultBatchGraphStatement( + traversals, + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @NonNull + @Override + public Iterator iterator() { + return this.traversals.iterator(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java new file mode 100644 index 00000000000..146e8e17ea2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultDseRemoteConnectionBuilder.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.DseGraphRemoteConnectionBuilder; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import net.jcip.annotations.NotThreadSafe; +import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; + +@NotThreadSafe +public class DefaultDseRemoteConnectionBuilder implements DseGraphRemoteConnectionBuilder { + + private final CqlSession session; + private DriverExecutionProfile executionProfile; + private String executionProfileName; + + public DefaultDseRemoteConnectionBuilder(CqlSession session) { + this.session = session; + } + + @Override + public RemoteConnection build() { + return new DseGraphRemoteConnection(session, executionProfile, executionProfileName); + } + + @Override + public DseGraphRemoteConnectionBuilder withExecutionProfile( + DriverExecutionProfile executionProfile) { + this.executionProfile = executionProfile; + return this; + } + + @Override + public DseGraphRemoteConnectionBuilder withExecutionProfileName(String executionProfileName) { + this.executionProfileName = executionProfileName; + return this; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java new file mode 100644 index 00000000000..0f6f1faabbf --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultFluentGraphStatement.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Map; +import net.jcip.annotations.Immutable; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; + +@Immutable +public class DefaultFluentGraphStatement extends GraphStatementBase + implements FluentGraphStatement { + + private final GraphTraversal traversal; + + public DefaultFluentGraphStatement( + GraphTraversal traversal, + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + super( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + this.traversal = traversal; + } + + @Override + protected FluentGraphStatement newInstance( + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + return new DefaultFluentGraphStatement( + traversal, + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @NonNull + @Override + public GraphTraversal getTraversal() { + return traversal; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java new file mode 100644 index 00000000000..71f79134237 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DefaultScriptGraphStatement.java @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultScriptGraphStatement extends GraphStatementBase + implements ScriptGraphStatement { + + private final String script; + private final Boolean isSystemQuery; + private final NullAllowingImmutableMap queryParams; + + public DefaultScriptGraphStatement( + String script, + Map queryParams, + Boolean isSystemQuery, + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + super( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + this.script = script; + this.isSystemQuery = isSystemQuery; + this.queryParams = NullAllowingImmutableMap.copyOf(queryParams); + } + + //// Script GraphStatement level options + + @NonNull + @Override + public String getScript() { + return script; + } + + @NonNull + @Override + public ScriptGraphStatement setSystemQuery(@Nullable Boolean newValue) { + return new DefaultScriptGraphStatement( + script, + queryParams, + newValue, + isIdempotent(), + getTimeout(), + getNode(), + getTimestamp(), + getExecutionProfile(), + getExecutionProfileName(), + getCustomPayload(), + getGraphName(), + getTraversalSource(), + getSubProtocol(), + getConsistencyLevel(), + getReadConsistencyLevel(), + getWriteConsistencyLevel()); + } + + @Nullable + @Override + public Boolean isSystemQuery() { + return isSystemQuery; + } + + @NonNull + @Override + public Map getQueryParams() { + return this.queryParams; + } + + @NonNull + @Override + public ScriptGraphStatement setQueryParam(@NonNull String name, @Nullable Object value) { + NullAllowingImmutableMap.Builder newQueryParamsBuilder = + NullAllowingImmutableMap.builder(); + for (Map.Entry entry : queryParams.entrySet()) { + if (!entry.getKey().equals(name)) { + newQueryParamsBuilder.put(entry.getKey(), entry.getValue()); + } + } + newQueryParamsBuilder.put(name, value); + return setQueryParams(newQueryParamsBuilder.build()); + } + + @NonNull + @Override + public ScriptGraphStatement removeQueryParam(@NonNull String name) { + if (!queryParams.containsKey(name)) { + return this; + } else { + NullAllowingImmutableMap.Builder newQueryParamsBuilder = + NullAllowingImmutableMap.builder(); + for (Map.Entry entry : queryParams.entrySet()) { + if (!entry.getKey().equals(name)) { + newQueryParamsBuilder.put(entry.getKey(), entry.getValue()); + } + } + return setQueryParams(newQueryParamsBuilder.build()); + } + } + + private ScriptGraphStatement setQueryParams(Map newQueryParams) { + return new DefaultScriptGraphStatement( + script, + newQueryParams, + isSystemQuery, + isIdempotent(), + getTimeout(), + getNode(), + getTimestamp(), + getExecutionProfile(), + getExecutionProfileName(), + getCustomPayload(), + getGraphName(), + getTraversalSource(), + getSubProtocol(), + getConsistencyLevel(), + getReadConsistencyLevel(), + getWriteConsistencyLevel()); + } + + @Override + protected ScriptGraphStatement newInstance( + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + return new DefaultScriptGraphStatement( + script, + queryParams, + isSystemQuery, + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Override + public String toString() { + return String.format("ScriptGraphStatement['%s', params: %s]", this.script, this.queryParams); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java new file mode 100644 index 00000000000..a5ec0a1d115 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphRemoteConnection.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import java.util.concurrent.CompletableFuture; +import net.jcip.annotations.Immutable; +import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; +import org.apache.tinkerpop.gremlin.process.remote.traversal.RemoteTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.Bytecode; + +@Immutable +public class DseGraphRemoteConnection implements RemoteConnection { + + private final CqlSession session; + private final DriverExecutionProfile executionProfile; + private final String executionProfileName; + + public DseGraphRemoteConnection( + CqlSession session, DriverExecutionProfile executionProfile, String executionProfileName) { + this.session = session; + this.executionProfile = executionProfile; + this.executionProfileName = executionProfileName; + } + + @Override + public CompletableFuture> submitAsync(Bytecode bytecode) { + return session + .executeAsync(new BytecodeGraphStatement(bytecode, executionProfile, executionProfileName)) + .toCompletableFuture() + .thenApply(DseGraphTraversal::new); + } + + @Override + public void close() throws Exception { + // do not close the DseSession here. + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java new file mode 100644 index 00000000000..e0a5cf2d675 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DseGraphTraversal.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import java.util.Iterator; +import java.util.NoSuchElementException; +import net.jcip.annotations.NotThreadSafe; +import org.apache.tinkerpop.gremlin.process.remote.traversal.AbstractRemoteTraversal; +import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; +import org.apache.tinkerpop.gremlin.process.traversal.Traverser; + +@NotThreadSafe +class DseGraphTraversal extends AbstractRemoteTraversal { + + private final Iterator graphNodeIterator; + + public DseGraphTraversal(AsyncGraphResultSet firstPage) { + this.graphNodeIterator = GraphResultSets.toSync(firstPage).iterator(); + } + + @Override + public boolean hasNext() { + return graphNodeIterator.hasNext(); + } + + @Override + public E next() { + return nextTraverser().get(); + } + + @Override + @SuppressWarnings("unchecked") + public Traverser.Admin nextTraverser() { + if (hasNext()) { + GraphNode nextGraphNode = graphNodeIterator.next(); + + // get the Raw object from the ObjectGraphNode, create a new remote Traverser + // with bulk = 1 because bulk is not supported yet. Casting should be ok + // because we have been able to deserialize into the right type. + return new DefaultRemoteTraverser<>((E) nextGraphNode.as(Object.class), 1); + } else { + // finished iterating/nothing to iterate. Normal behaviour. + throw new NoSuchElementException(); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java new file mode 100644 index 00000000000..b5f8c30fd8c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/DsePredicate.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import java.util.function.BiPredicate; + +/** + * An extension of TinkerPop's {@link BiPredicate} adding simple pre-condition checking methods that + * have to be written in the implementations. + */ +public interface DsePredicate extends BiPredicate { + + default void preEvaluate(Object condition) { + Preconditions.checkArgument( + this.isValidCondition(condition), "Invalid condition provided: %s", condition); + } + + boolean isValidCondition(Object condition); +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java new file mode 100644 index 00000000000..5ab836babbf --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/EditDistance.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.oss.driver.shaded.guava.common.base.Objects; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import java.io.Serializable; +import net.jcip.annotations.Immutable; + +/** + * A container for a term and maximum edit distance. + * + *

The context in which this is used determines the semantics of the edit distance. For instance, + * it might indicate single-character edits if used with fuzzy search queries or whole word + * movements if used with phrase proximity queries. + */ +@Immutable +public class EditDistance implements Serializable { + + private static final long serialVersionUID = 1L; + + public static final int DEFAULT_EDIT_DISTANCE = 0; + + public final String query; + public final int distance; + + public EditDistance(String query) { + this(query, DEFAULT_EDIT_DISTANCE); + } + + public EditDistance(String query, int distance) { + Preconditions.checkNotNull(query, "Query cannot be null."); + Preconditions.checkArgument(distance >= 0, "Edit distance cannot be negative."); + this.query = query; + this.distance = distance; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof EditDistance)) { + return false; + } + EditDistance that = (EditDistance) o; + return distance == that.distance && Objects.equal(query, that.query); + } + + @Override + public int hashCode() { + return Objects.hashCode(query, distance); + } + + @Override + public String toString() { + return "EditDistance{" + "query='" + query + '\'' + ", distance=" + distance + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java new file mode 100644 index 00000000000..39949e97198 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoPredicate.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.graph.predicates.Geo; +import com.datastax.dse.driver.internal.core.data.geometry.Distance; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; + +/** + * List of predicates for geolocation usage with DseGraph and Search indexes. Should not be accessed + * directly but through the {@link Geo} static methods. + */ +public enum GeoPredicate implements DsePredicate { + + /** Matches values within the distance specified by the condition over a Haversine geometry. */ + inside { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + if (value == null) { + return false; + } + Preconditions.checkArgument(value instanceof Geometry); + Distance distance = (Distance) condition; + if (value instanceof Point) { + return haversineDistanceInDegrees(distance.getCenter(), (Point) value) + <= distance.getRadius(); + } else if (value instanceof Polygon) { + for (Point point : ((Polygon) value).getExteriorRing()) { + if (haversineDistanceInDegrees(distance.getCenter(), point) > distance.getRadius()) { + return false; + } + } + } else if (value instanceof LineString) { + for (Point point : ((LineString) value).getPoints()) { + if (haversineDistanceInDegrees(distance.getCenter(), point) > distance.getRadius()) { + return false; + } + } + } else { + throw new UnsupportedOperationException( + String.format("Value type '%s' unsupported", value.getClass().getName())); + } + + return true; + } + + @Override + public String toString() { + return "inside"; + } + }, + + /** + * Matches values contained in the geometric entity specified by the condition on a 2D Euclidean + * plane. + */ + insideCartesian { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + if (value == null) { + return false; + } + Preconditions.checkArgument(value instanceof Geometry); + return ((Geometry) condition).contains((Geometry) value); + } + + @Override + public String toString() { + return "insideCartesian"; + } + }; + + @Override + public boolean isValidCondition(Object condition) { + return condition != null; + } + + static double haversineDistanceInDegrees(Point p1, Point p2) { + double dLat = Math.toRadians(p2.Y() - p1.Y()); + double dLon = Math.toRadians(p2.X() - p1.X()); + double lat1 = Math.toRadians(p1.Y()); + double lat2 = Math.toRadians(p2.Y()); + + double a = + Math.pow(Math.sin(dLat / 2), 2) + + Math.pow(Math.sin(dLon / 2), 2) * Math.cos(lat1) * Math.cos(lat2); + double c = 2 * Math.asin(Math.sqrt(a)); + return Math.toDegrees(c); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java new file mode 100644 index 00000000000..80d55dac69d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GeoUtils.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +public class GeoUtils { + private static final double DEGREES_TO_RADIANS = Math.PI / 180; + private static final double EARTH_MEAN_RADIUS_KM = 6371.0087714; + private static final double DEG_TO_KM = DEGREES_TO_RADIANS * EARTH_MEAN_RADIUS_KM; + private static final double KM_TO_MILES = 0.621371192; + public static final double KM_TO_DEG = 1 / DEG_TO_KM; + public static final double MILES_TO_KM = 1 / KM_TO_MILES; +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java new file mode 100644 index 00000000000..c95b26b2e26 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphConversions.java @@ -0,0 +1,410 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; +import com.datastax.dse.protocol.internal.request.RawBytesQuery; +import com.datastax.dse.protocol.internal.request.query.ContinuousPagingOptions; +import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import io.netty.buffer.ByteBuf; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.Traverser; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; + +/** + * Utility class to move boilerplate out of {@link GraphRequestHandler}. + * + *

We extend {@link Conversions} only for methods that can be directly reused as-is; if something + * needs to be customized, it will be duplicated here instead of making the parent method + * "pluggable". + */ +public class GraphConversions extends Conversions { + + static final String GRAPH_LANG_OPTION_KEY = "graph-language"; + static final String GRAPH_NAME_OPTION_KEY = "graph-name"; + static final String GRAPH_SOURCE_OPTION_KEY = "graph-source"; + static final String GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY = "graph-read-consistency"; + static final String GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY = "graph-write-consistency"; + static final String GRAPH_RESULTS_OPTION_KEY = "graph-results"; + static final String GRAPH_TIMEOUT_OPTION_KEY = "request-timeout"; + static final String GRAPH_BINARY_QUERY_OPTION_KEY = "graph-binary-query"; + + static final String LANGUAGE_GROOVY = "gremlin-groovy"; + static final String LANGUAGE_BYTECODE = "bytecode-json"; + + private static final BufferFactory FACTORY = new DseNettyBufferFactory(); + + @VisibleForTesting static final byte[] EMPTY_STRING_QUERY = "".getBytes(UTF_8); + + public static Message createContinuousMessageFromGraphStatement( + GraphStatement statement, + GraphProtocol subProtocol, + DriverExecutionProfile config, + InternalDriverContext context, + GraphBinaryModule graphBinaryModule) { + + final List encodedQueryParams; + if (!(statement instanceof ScriptGraphStatement) + || ((ScriptGraphStatement) statement).getQueryParams().isEmpty()) { + encodedQueryParams = Collections.emptyList(); + } else { + try { + Map queryParams = ((ScriptGraphStatement) statement).getQueryParams(); + if (subProtocol.isGraphBinary()) { + Buffer graphBinaryParams = graphBinaryModule.serialize(queryParams); + encodedQueryParams = Collections.singletonList(graphBinaryParams.nioBuffer()); + graphBinaryParams.release(); + } else { + encodedQueryParams = + Collections.singletonList( + GraphSONUtils.serializeToByteBuffer(queryParams, subProtocol)); + } + } catch (IOException e) { + throw new UncheckedIOException( + "Couldn't serialize parameters for GraphStatement: " + statement, e); + } + } + + int consistencyLevel = + DefaultConsistencyLevel.valueOf(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .getProtocolCode(); + + long timestamp = statement.getTimestamp(); + if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { + timestamp = context.getTimestampGenerator().next(); + } + + int pageSize = config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE); + int maxPages = config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES); + int maxPagesPerSecond = + config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND); + int maxEnqueuedPages = + config.getInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES); + ContinuousPagingOptions options = + new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages); + + DseQueryOptions queryOptions = + new DseQueryOptions( + consistencyLevel, + encodedQueryParams, + Collections.emptyMap(), // ignored by the DSE Graph server + true, // also ignored + pageSize, + null, + ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL, // also ignored + timestamp, + null, // also ignored + false, // graph CP does not support sizeInBytes + options); + + if (statement instanceof ScriptGraphStatement) { + return new Query(((ScriptGraphStatement) statement).getScript(), queryOptions); + } else { + return new RawBytesQuery(getQueryBytes(statement, subProtocol), queryOptions); + } + } + + static Message createMessageFromGraphStatement( + GraphStatement statement, + GraphProtocol subProtocol, + DriverExecutionProfile config, + InternalDriverContext context, + GraphBinaryModule graphBinaryModule) { + + final List encodedQueryParams; + if (!(statement instanceof ScriptGraphStatement) + || ((ScriptGraphStatement) statement).getQueryParams().isEmpty()) { + encodedQueryParams = Collections.emptyList(); + } else { + try { + Map queryParams = ((ScriptGraphStatement) statement).getQueryParams(); + if (subProtocol.isGraphBinary()) { + Buffer graphBinaryParams = graphBinaryModule.serialize(queryParams); + encodedQueryParams = Collections.singletonList(graphBinaryParams.nioBuffer()); + graphBinaryParams.release(); + } else { + encodedQueryParams = + Collections.singletonList( + GraphSONUtils.serializeToByteBuffer(queryParams, subProtocol)); + } + } catch (IOException e) { + throw new UncheckedIOException( + "Couldn't serialize parameters for GraphStatement: " + statement, e); + } + } + + ConsistencyLevel consistency = statement.getConsistencyLevel(); + int consistencyLevel = + (consistency == null) + ? context + .getConsistencyLevelRegistry() + .nameToCode(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + : consistency.getProtocolCode(); + + long timestamp = statement.getTimestamp(); + if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { + timestamp = context.getTimestampGenerator().next(); + } + + DseQueryOptions queryOptions = + new DseQueryOptions( + consistencyLevel, + encodedQueryParams, + Collections.emptyMap(), // ignored by the DSE Graph server + true, // also ignored + 50, // also ignored + null, // also ignored + ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL, // also ignored + timestamp, + null, // also ignored + false, // also ignored + null // also ignored + ); + + if (statement instanceof ScriptGraphStatement) { + return new Query(((ScriptGraphStatement) statement).getScript(), queryOptions); + } else { + return new RawBytesQuery(getQueryBytes(statement, subProtocol), queryOptions); + } + } + + // This method returns either a Bytecode object, or a List if the statement is a + // BatchGraphStatement + @VisibleForTesting + public static Object bytecodeToSerialize(GraphStatement statement) { + Preconditions.checkArgument( + statement instanceof FluentGraphStatement + || statement instanceof BatchGraphStatement + || statement instanceof BytecodeGraphStatement, + "To serialize bytecode the query must be a fluent or batch statement, but was: %s", + statement.getClass()); + + Object toSerialize; + if (statement instanceof FluentGraphStatement) { + toSerialize = ((FluentGraphStatement) statement).getTraversal().asAdmin().getBytecode(); + } else if (statement instanceof BatchGraphStatement) { + // transform the Iterator to List + toSerialize = + ImmutableList.copyOf( + Iterators.transform( + ((BatchGraphStatement) statement).iterator(), + traversal -> traversal.asAdmin().getBytecode())); + } else { + toSerialize = ((BytecodeGraphStatement) statement).getBytecode(); + } + return toSerialize; + } + + private static byte[] getQueryBytes(GraphStatement statement, GraphProtocol graphSubProtocol) { + try { + return graphSubProtocol.isGraphBinary() + // if GraphBinary, the query is encoded in the custom payload, and not in the query field + // see GraphConversions#createCustomPayload() + ? EMPTY_STRING_QUERY + : GraphSONUtils.serializeToBytes(bytecodeToSerialize(statement), graphSubProtocol); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public static Map createCustomPayload( + GraphStatement statement, + GraphProtocol subProtocol, + DriverExecutionProfile config, + InternalDriverContext context, + GraphBinaryModule graphBinaryModule) { + + ProtocolVersion protocolVersion = context.getProtocolVersion(); + + NullAllowingImmutableMap.Builder payload = + NullAllowingImmutableMap.builder(); + Map statementOptions = statement.getCustomPayload(); + payload.putAll(statementOptions); + + final String graphLanguage; + + // Don't override anything that's already provided at the statement level + if (!statementOptions.containsKey(GRAPH_LANG_OPTION_KEY)) { + graphLanguage = + statement instanceof ScriptGraphStatement ? LANGUAGE_GROOVY : LANGUAGE_BYTECODE; + payload.put(GRAPH_LANG_OPTION_KEY, TypeCodecs.TEXT.encode(graphLanguage, protocolVersion)); + } else { + graphLanguage = + TypeCodecs.TEXT.decode(statementOptions.get(GRAPH_LANG_OPTION_KEY), protocolVersion); + Preconditions.checkNotNull( + graphLanguage, "A null value was set for the graph-language custom payload key."); + } + + if (!isSystemQuery(statement, config)) { + if (!statementOptions.containsKey(GRAPH_NAME_OPTION_KEY)) { + String graphName = statement.getGraphName(); + if (graphName == null) { + graphName = config.getString(DseDriverOption.GRAPH_NAME, null); + } + if (graphName != null) { + payload.put(GRAPH_NAME_OPTION_KEY, TypeCodecs.TEXT.encode(graphName, protocolVersion)); + } + } + if (!statementOptions.containsKey(GRAPH_SOURCE_OPTION_KEY)) { + String traversalSource = statement.getTraversalSource(); + if (traversalSource == null) { + traversalSource = config.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); + } + if (traversalSource != null) { + payload.put( + GRAPH_SOURCE_OPTION_KEY, TypeCodecs.TEXT.encode(traversalSource, protocolVersion)); + } + } + } + + // the payload allows null entry values so doing a get directly here and checking for null + final ByteBuffer payloadInitialProtocol = statementOptions.get(GRAPH_RESULTS_OPTION_KEY); + if (payloadInitialProtocol == null) { + Preconditions.checkNotNull(subProtocol); + payload.put( + GRAPH_RESULTS_OPTION_KEY, + TypeCodecs.TEXT.encode(subProtocol.toInternalCode(), protocolVersion)); + } else { + subProtocol = + GraphProtocol.fromString(TypeCodecs.TEXT.decode(payloadInitialProtocol, protocolVersion)); + } + + if (subProtocol.isGraphBinary() && graphLanguage.equals(LANGUAGE_BYTECODE)) { + Object bytecodeQuery = bytecodeToSerialize(statement); + try { + Buffer bytecodeByteBuf = graphBinaryModule.serialize(bytecodeQuery); + payload.put(GRAPH_BINARY_QUERY_OPTION_KEY, bytecodeByteBuf.nioBuffer()); + bytecodeByteBuf.release(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + if (!statementOptions.containsKey(GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) { + ConsistencyLevel readCl = statement.getReadConsistencyLevel(); + String readClString = + readCl != null + ? readCl.name() + : config.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); + if (readClString != null) { + payload.put( + GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY, + TypeCodecs.TEXT.encode(readClString, protocolVersion)); + } + } + + if (!statementOptions.containsKey(GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) { + ConsistencyLevel writeCl = statement.getWriteConsistencyLevel(); + String writeClString = + writeCl != null + ? writeCl.name() + : config.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); + if (writeClString != null) { + payload.put( + GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY, + TypeCodecs.TEXT.encode(writeClString, protocolVersion)); + } + } + + if (!statementOptions.containsKey(GRAPH_TIMEOUT_OPTION_KEY)) { + Duration timeout = statement.getTimeout(); + if (timeout == null) { + timeout = config.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); + } + if (timeout != null && !timeout.isZero()) { + payload.put( + GRAPH_TIMEOUT_OPTION_KEY, + TypeCodecs.BIGINT.encode(timeout.toMillis(), protocolVersion)); + } + } + return payload.build(); + } + + private static boolean isSystemQuery(GraphStatement statement, DriverExecutionProfile config) { + if (statement instanceof ScriptGraphStatement) { + Boolean statementValue = ((ScriptGraphStatement) statement).isSystemQuery(); + if (statementValue != null) { + return statementValue; + } + } + return config.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); + } + + public static GraphNode createGraphBinaryGraphNode( + List data, GraphBinaryModule graphBinaryModule) throws IOException { + // there should be only one column in the given row + Preconditions.checkArgument(data.size() == 1, "Invalid row given to deserialize"); + + Buffer toDeserialize = FACTORY.wrap(data.get(0)); + Object deserializedObject = graphBinaryModule.deserialize(toDeserialize); + toDeserialize.release(); + assert deserializedObject instanceof Traverser + : "Graph protocol error. Received object should be a Traverser but it is not."; + return new ObjectGraphNode(deserializedObject); + } + + public static Duration resolveGraphRequestTimeout( + GraphStatement statement, InternalDriverContext context) { + DriverExecutionProfile executionProfile = resolveExecutionProfile(statement, context); + return statement.getTimeout() != null + ? statement.getTimeout() + : executionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT); + } + + public static GraphProtocol resolveGraphSubProtocol( + GraphStatement statement, + GraphSupportChecker graphSupportChecker, + InternalDriverContext context) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + return graphSupportChecker.inferGraphProtocol(statement, executionProfile, context); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java new file mode 100644 index 00000000000..b6472f690d3 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverter.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.QueryTrace; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.UUID; +import java.util.concurrent.CompletionStage; + +/** + * Handles conversions from / to GraphExecutionInfo and ExecutionInfo since GraphExecutionInfo has + * been deprecated by JAVA-2556. + */ +public class GraphExecutionInfoConverter { + + /** + * Called exclusively from default methods in API interfaces {@link + * com.datastax.dse.driver.api.core.graph.GraphResultSet} and {@link + * com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet}. Graph result set implementations + * do not use this method but rather the other one below. + */ + @SuppressWarnings("deprecation") + public static ExecutionInfo convert( + com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo) { + return new ExecutionInfo() { + + @NonNull + @Override + public Request getRequest() { + return graphExecutionInfo.getStatement(); + } + + @NonNull + @Override + public Statement getStatement() { + throw new ClassCastException("GraphStatement cannot be cast to Statement"); + } + + @Nullable + @Override + public Node getCoordinator() { + return graphExecutionInfo.getCoordinator(); + } + + @Override + public int getSpeculativeExecutionCount() { + return graphExecutionInfo.getSpeculativeExecutionCount(); + } + + @Override + public int getSuccessfulExecutionIndex() { + return graphExecutionInfo.getSuccessfulExecutionIndex(); + } + + @NonNull + @Override + public List> getErrors() { + return graphExecutionInfo.getErrors(); + } + + @Nullable + @Override + public ByteBuffer getPagingState() { + return null; + } + + @NonNull + @Override + public List getWarnings() { + return graphExecutionInfo.getWarnings(); + } + + @NonNull + @Override + public Map getIncomingPayload() { + return graphExecutionInfo.getIncomingPayload(); + } + + @Override + public boolean isSchemaInAgreement() { + return true; + } + + @Nullable + @Override + public UUID getTracingId() { + return null; + } + + @NonNull + @Override + public CompletionStage getQueryTraceAsync() { + return CompletableFutures.failedFuture( + new IllegalStateException("Tracing was disabled for this request")); + } + + @Override + public int getResponseSizeInBytes() { + return -1; + } + + @Override + public int getCompressedResponseSizeInBytes() { + return -1; + } + }; + } + + /** + * Called from graph result set implementations, to convert the original {@link ExecutionInfo} + * produced by request handlers into the (deprecated) type GraphExecutionInfo. + */ + @SuppressWarnings("deprecation") + public static com.datastax.dse.driver.api.core.graph.GraphExecutionInfo convert( + ExecutionInfo executionInfo) { + return new com.datastax.dse.driver.api.core.graph.GraphExecutionInfo() { + + @Override + public GraphStatement getStatement() { + return (GraphStatement) executionInfo.getRequest(); + } + + @Override + public Node getCoordinator() { + return executionInfo.getCoordinator(); + } + + @Override + public int getSpeculativeExecutionCount() { + return executionInfo.getSpeculativeExecutionCount(); + } + + @Override + public int getSuccessfulExecutionIndex() { + return executionInfo.getSuccessfulExecutionIndex(); + } + + @Override + public List> getErrors() { + return executionInfo.getErrors(); + } + + @Override + public List getWarnings() { + return executionInfo.getWarnings(); + } + + @Override + public Map getIncomingPayload() { + return executionInfo.getIncomingPayload(); + } + }; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java new file mode 100644 index 00000000000..6b7a9f4c430 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphProtocol.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public enum GraphProtocol { + GRAPHSON_1_0("graphson-1.0"), + GRAPHSON_2_0("graphson-2.0"), + GRAPH_BINARY_1_0("graph-binary-1.0"), + ; + + private static final Map BY_CODE; + + static { + Map tmp = new HashMap<>(); + for (GraphProtocol value : values()) { + tmp.put(value.stringRepresentation, value); + } + BY_CODE = Collections.unmodifiableMap(tmp); + } + + private final String stringRepresentation; + + GraphProtocol(String stringRepresentation) { + this.stringRepresentation = stringRepresentation; + } + + @NonNull + public String toInternalCode() { + return stringRepresentation; + } + + @NonNull + public static GraphProtocol fromString(@Nullable String stringRepresentation) { + if (stringRepresentation == null || !BY_CODE.containsKey(stringRepresentation)) { + StringBuilder sb = + new StringBuilder( + String.format( + "Graph protocol used [\"%s\"] unknown. Possible values are: [ \"%s\"", + stringRepresentation, GraphProtocol.values()[0].toInternalCode())); + for (int i = 1; i < GraphProtocol.values().length; i++) { + sb.append(String.format(", \"%s\"", GraphProtocol.values()[i].toInternalCode())); + } + sb.append("]"); + throw new IllegalArgumentException(sb.toString()); + } + return BY_CODE.get(stringRepresentation); + } + + public boolean isGraphBinary() { + return this == GRAPH_BINARY_1_0; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java new file mode 100644 index 00000000000..050b03c66f4 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestAsyncProcessor.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.ThreadSafe; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; + +@ThreadSafe +public class GraphRequestAsyncProcessor + implements RequestProcessor, CompletionStage> { + + private final GraphBinaryModule graphBinaryModule; + private final GraphSupportChecker graphSupportChecker; + + public GraphRequestAsyncProcessor( + DefaultDriverContext context, GraphSupportChecker graphSupportChecker) { + TypeSerializerRegistry typeSerializerRegistry = + GraphBinaryModule.createDseTypeSerializerRegistry(context); + this.graphBinaryModule = + new GraphBinaryModule( + new GraphBinaryReader(typeSerializerRegistry), + new GraphBinaryWriter(typeSerializerRegistry)); + this.graphSupportChecker = graphSupportChecker; + } + + @NonNull + public GraphBinaryModule getGraphBinaryModule() { + return graphBinaryModule; + } + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return (request instanceof ScriptGraphStatement + || request instanceof FluentGraphStatement + || request instanceof BatchGraphStatement + || request instanceof BytecodeGraphStatement) + && resultType.equals(GraphStatement.ASYNC); + } + + @Override + public CompletionStage process( + GraphStatement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + + if (graphSupportChecker.isPagingEnabled(request, context)) { + return new ContinuousGraphRequestHandler( + request, + session, + context, + sessionLogPrefix, + getGraphBinaryModule(), + graphSupportChecker) + .handle(); + } else { + return new GraphRequestHandler( + request, + session, + context, + sessionLogPrefix, + getGraphBinaryModule(), + graphSupportChecker) + .handle(); + } + } + + @Override + public CompletionStage newFailure(RuntimeException error) { + return CompletableFutures.failedFuture(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java new file mode 100644 index 00000000000..5c9ceb00df2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandler.java @@ -0,0 +1,871 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.DriverException; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; +import com.datastax.oss.driver.api.core.RequestThrottlingException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.connection.FrameTooLongException; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; +import com.datastax.oss.driver.api.core.servererrors.ProtocolError; +import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; +import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; +import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; +import com.datastax.oss.driver.api.core.session.throttling.Throttled; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.internal.core.channel.DriverChannel; +import com.datastax.oss.driver.internal.core.channel.ResponseCallback; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.cql.DefaultExecutionInfo; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; +import com.datastax.oss.driver.internal.core.tracker.RequestLogger; +import com.datastax.oss.driver.internal.core.util.Loggers; +import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.response.Error; +import com.datastax.oss.protocol.internal.response.Result; +import com.datastax.oss.protocol.internal.response.result.Rows; +import com.datastax.oss.protocol.internal.response.result.Void; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.handler.codec.EncoderException; +import io.netty.util.Timeout; +import io.netty.util.Timer; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.AbstractMap; +import java.util.ArrayDeque; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class GraphRequestHandler implements Throttled { + + private static final Logger LOG = LoggerFactory.getLogger(GraphRequestHandler.class); + + private static final long NANOTIME_NOT_MEASURED_YET = -1; + private static final int NO_SUCCESSFUL_EXECUTION = -1; + + private final long startTimeNanos; + private final String logPrefix; + private final GraphStatement initialStatement; + private final DefaultSession session; + private final InternalDriverContext context; + protected final CompletableFuture result; + private final Timer timer; + + /** + * How many speculative executions are currently running (including the initial execution). We + * track this in order to know when to fail the request if all executions have reached the end of + * the query plan. + */ + private final AtomicInteger activeExecutionsCount; + + /** + * How many speculative executions have started (excluding the initial execution), whether they + * have completed or not. We track this in order to fill {@link + * ExecutionInfo#getSpeculativeExecutionCount()}. + */ + private final AtomicInteger startedSpeculativeExecutionsCount; + + private final Timeout scheduledTimeout; + private final List scheduledExecutions; + private final List inFlightCallbacks; + private final RequestThrottler throttler; + private final RequestTracker requestTracker; + private final SessionMetricUpdater sessionMetricUpdater; + private final GraphBinaryModule graphBinaryModule; + private final GraphSupportChecker graphSupportChecker; + + // The errors on the nodes that were already tried (lazily initialized on the first error). + // We don't use a map because nodes can appear multiple times. + private volatile List> errors; + + GraphRequestHandler( + @NonNull GraphStatement statement, + @NonNull DefaultSession dseSession, + @NonNull InternalDriverContext context, + @NonNull String sessionLogPrefix, + @NonNull GraphBinaryModule graphBinaryModule, + @NonNull GraphSupportChecker graphSupportChecker) { + this.startTimeNanos = System.nanoTime(); + this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); + LOG.trace("[{}] Creating new Graph request handler for request {}", logPrefix, statement); + this.initialStatement = statement; + this.session = dseSession; + this.context = context; + this.graphSupportChecker = graphSupportChecker; + this.result = new CompletableFuture<>(); + this.result.exceptionally( + t -> { + try { + if (t instanceof CancellationException) { + cancelScheduledTasks(); + context.getRequestThrottler().signalCancel(this); + } + } catch (Throwable t2) { + Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); + } + return null; + }); + this.graphBinaryModule = graphBinaryModule; + this.timer = context.getNettyOptions().getTimer(); + + this.activeExecutionsCount = new AtomicInteger(1); + this.startedSpeculativeExecutionsCount = new AtomicInteger(0); + this.scheduledExecutions = new CopyOnWriteArrayList<>(); + this.inFlightCallbacks = new CopyOnWriteArrayList<>(); + + this.requestTracker = context.getRequestTracker(); + this.sessionMetricUpdater = session.getMetricUpdater(); + + Duration timeout = GraphConversions.resolveGraphRequestTimeout(statement, context); + this.scheduledTimeout = scheduleTimeout(timeout); + + this.throttler = context.getRequestThrottler(); + this.throttler.register(this); + } + + @Override + public void onThrottleReady(boolean wasDelayed) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(initialStatement, context); + if (wasDelayed + // avoid call to nanoTime() if metric is disabled: + && sessionMetricUpdater.isEnabled( + DefaultSessionMetric.THROTTLING_DELAY, executionProfile.getName())) { + sessionMetricUpdater.updateTimer( + DefaultSessionMetric.THROTTLING_DELAY, + executionProfile.getName(), + System.nanoTime() - startTimeNanos, + TimeUnit.NANOSECONDS); + } + Queue queryPlan = + initialStatement.getNode() != null + ? new SimpleQueryPlan(initialStatement.getNode()) + : context + .getLoadBalancingPolicyWrapper() + .newQueryPlan(initialStatement, executionProfile.getName(), session); + sendRequest(initialStatement, null, queryPlan, 0, 0, true); + } + + public CompletionStage handle() { + return result; + } + + private Timeout scheduleTimeout(Duration timeoutDuration) { + if (timeoutDuration != null && timeoutDuration.toNanos() > 0) { + try { + return this.timer.newTimeout( + (Timeout timeout1) -> + setFinalError( + initialStatement, + new DriverTimeoutException("Query timed out after " + timeoutDuration), + null, + NO_SUCCESSFUL_EXECUTION), + timeoutDuration.toNanos(), + TimeUnit.NANOSECONDS); + } catch (IllegalStateException e) { + // If we raced with session shutdown the timer might be closed already, rethrow with a more + // explicit message + result.completeExceptionally( + "cannot be started once stopped".equals(e.getMessage()) + ? new IllegalStateException("Session is closed") + : e); + } + } + return null; + } + + /** + * Sends the request to the next available node. + * + * @param retriedNode if not null, it will be attempted first before the rest of the query plan. + * @param queryPlan the list of nodes to try (shared with all other executions) + * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. + * @param retryCount the number of times that the retry policy was invoked for this execution + * already (note that some internal retries don't go through the policy, and therefore don't + * increment this counter) + * @param scheduleNextExecution whether to schedule the next speculative execution + */ + private void sendRequest( + GraphStatement statement, + Node retriedNode, + Queue queryPlan, + int currentExecutionIndex, + int retryCount, + boolean scheduleNextExecution) { + if (result.isDone()) { + return; + } + Node node = retriedNode; + DriverChannel channel = null; + if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { + while (!result.isDone() && (node = queryPlan.poll()) != null) { + channel = session.getChannel(node, logPrefix); + if (channel != null) { + break; + } else { + recordError(node, new NodeUnavailableException(node)); + } + } + } + if (channel == null) { + // We've reached the end of the query plan without finding any node to write to + if (!result.isDone() && activeExecutionsCount.decrementAndGet() == 0) { + // We're the last execution so fail the result + setFinalError( + statement, + AllNodesFailedException.fromErrors(this.errors), + null, + NO_SUCCESSFUL_EXECUTION); + } + } else { + NodeResponseCallback nodeResponseCallback = + new NodeResponseCallback( + statement, + node, + queryPlan, + channel, + currentExecutionIndex, + retryCount, + scheduleNextExecution, + logPrefix); + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + GraphProtocol graphSubProtocol = + GraphConversions.resolveGraphSubProtocol(statement, graphSupportChecker, context); + Message message = + GraphConversions.createMessageFromGraphStatement( + statement, graphSubProtocol, executionProfile, context, graphBinaryModule); + Map customPayload = + GraphConversions.createCustomPayload( + statement, graphSubProtocol, executionProfile, context, graphBinaryModule); + channel + .write(message, statement.isTracing(), customPayload, nodeResponseCallback) + .addListener(nodeResponseCallback); + } + } + + private void recordError(Node node, Throwable error) { + // Use a local variable to do only a single single volatile read in the nominal case + List> errorsSnapshot = this.errors; + if (errorsSnapshot == null) { + synchronized (GraphRequestHandler.this) { + errorsSnapshot = this.errors; + if (errorsSnapshot == null) { + this.errors = errorsSnapshot = new CopyOnWriteArrayList<>(); + } + } + } + errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); + } + + private void cancelScheduledTasks() { + if (this.scheduledTimeout != null) { + this.scheduledTimeout.cancel(); + } + if (scheduledExecutions != null) { + for (Timeout scheduledExecution : scheduledExecutions) { + scheduledExecution.cancel(); + } + } + for (NodeResponseCallback callback : inFlightCallbacks) { + callback.cancel(); + } + } + + private void setFinalResult( + Result resultMessage, Frame responseFrame, NodeResponseCallback callback) { + try { + ExecutionInfo executionInfo = buildExecutionInfo(callback, responseFrame); + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(callback.statement, context); + GraphProtocol subProtocol = + GraphConversions.resolveGraphSubProtocol( + callback.statement, graphSupportChecker, context); + Queue graphNodes = new ArrayDeque<>(); + for (List row : ((Rows) resultMessage).getData()) { + if (subProtocol.isGraphBinary()) { + graphNodes.offer( + GraphConversions.createGraphBinaryGraphNode( + row, GraphRequestHandler.this.graphBinaryModule)); + } else { + graphNodes.offer(GraphSONUtils.createGraphNode(row, subProtocol)); + } + } + + DefaultAsyncGraphResultSet resultSet = + new DefaultAsyncGraphResultSet(executionInfo, graphNodes, subProtocol); + if (result.complete(resultSet)) { + cancelScheduledTasks(); + throttler.signalSuccess(this); + + // Only call nanoTime() if we're actually going to use it + long completionTimeNanos = NANOTIME_NOT_MEASURED_YET, + totalLatencyNanos = NANOTIME_NOT_MEASURED_YET; + if (!(requestTracker instanceof NoopRequestTracker)) { + completionTimeNanos = System.nanoTime(); + totalLatencyNanos = completionTimeNanos - startTimeNanos; + long nodeLatencyNanos = completionTimeNanos - callback.nodeStartTimeNanos; + requestTracker.onNodeSuccess( + callback.statement, nodeLatencyNanos, executionProfile, callback.node, logPrefix); + requestTracker.onSuccess( + callback.statement, totalLatencyNanos, executionProfile, callback.node, logPrefix); + } + if (sessionMetricUpdater.isEnabled( + DseSessionMetric.GRAPH_REQUESTS, executionProfile.getName())) { + if (completionTimeNanos == NANOTIME_NOT_MEASURED_YET) { + completionTimeNanos = System.nanoTime(); + totalLatencyNanos = completionTimeNanos - startTimeNanos; + } + sessionMetricUpdater.updateTimer( + DseSessionMetric.GRAPH_REQUESTS, + executionProfile.getName(), + totalLatencyNanos, + TimeUnit.NANOSECONDS); + } + } + // log the warnings if they have NOT been disabled + if (!executionInfo.getWarnings().isEmpty() + && executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOG_WARNINGS) + && LOG.isWarnEnabled()) { + logServerWarnings(callback.statement, executionInfo.getWarnings()); + } + } catch (Throwable error) { + setFinalError(callback.statement, error, callback.node, NO_SUCCESSFUL_EXECUTION); + } + } + + private void logServerWarnings(GraphStatement statement, List warnings) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + // use the RequestLogFormatter to format the query + StringBuilder statementString = new StringBuilder(); + context + .getRequestLogFormatter() + .appendRequest( + statement, + executionProfile.getInt( + DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, + RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_QUERY_LENGTH), + executionProfile.getBoolean( + DefaultDriverOption.REQUEST_LOGGER_VALUES, + RequestLogger.DEFAULT_REQUEST_LOGGER_SHOW_VALUES), + executionProfile.getInt( + DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, + RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUES), + executionProfile.getInt( + DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, + RequestLogger.DEFAULT_REQUEST_LOGGER_MAX_VALUE_LENGTH), + statementString); + // log each warning separately + warnings.forEach( + (warning) -> + LOG.warn("Query '{}' generated server side warning(s): {}", statementString, warning)); + } + + private ExecutionInfo buildExecutionInfo(NodeResponseCallback callback, Frame responseFrame) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(callback.statement, context); + return new DefaultExecutionInfo( + callback.statement, + callback.node, + startedSpeculativeExecutionsCount.get(), + callback.execution, + errors, + null, + responseFrame, + true, + session, + context, + executionProfile); + } + + @Override + public void onThrottleFailure(@NonNull RequestThrottlingException error) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(initialStatement, context); + sessionMetricUpdater.incrementCounter( + DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); + setFinalError(initialStatement, error, null, NO_SUCCESSFUL_EXECUTION); + } + + private void setFinalError( + GraphStatement statement, Throwable error, Node node, int execution) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(statement, context); + if (error instanceof DriverException) { + ((DriverException) error) + .setExecutionInfo( + new DefaultExecutionInfo( + statement, + node, + startedSpeculativeExecutionsCount.get(), + execution, + errors, + null, + null, + true, + session, + context, + executionProfile)); + } + if (result.completeExceptionally(error)) { + cancelScheduledTasks(); + if (!(requestTracker instanceof NoopRequestTracker)) { + long latencyNanos = System.nanoTime() - startTimeNanos; + requestTracker.onError(statement, error, latencyNanos, executionProfile, node, logPrefix); + } + if (error instanceof DriverTimeoutException) { + throttler.signalTimeout(this); + sessionMetricUpdater.incrementCounter( + DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, executionProfile.getName()); + } else if (!(error instanceof RequestThrottlingException)) { + throttler.signalError(this, error); + } + } + } + + /** + * Handles the interaction with a single node in the query plan. + * + *

An instance of this class is created each time we (re)try a node. + */ + private class NodeResponseCallback + implements ResponseCallback, GenericFutureListener> { + + private final long nodeStartTimeNanos = System.nanoTime(); + private final GraphStatement statement; + private final Node node; + private final Queue queryPlan; + private final DriverChannel channel; + // The identifier of the current execution (0 for the initial execution, 1 for the first + // speculative execution, etc.) + private final int execution; + // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for + // the first attempt of each execution). + private final int retryCount; + private final boolean scheduleNextExecution; + private final String logPrefix; + private final DriverExecutionProfile executionProfile; + + private NodeResponseCallback( + GraphStatement statement, + Node node, + Queue queryPlan, + DriverChannel channel, + int execution, + int retryCount, + boolean scheduleNextExecution, + String logPrefix) { + this.statement = statement; + this.node = node; + this.queryPlan = queryPlan; + this.channel = channel; + this.execution = execution; + this.retryCount = retryCount; + this.scheduleNextExecution = scheduleNextExecution; + this.logPrefix = logPrefix + "|" + execution; + this.executionProfile = Conversions.resolveExecutionProfile(statement, context); + } + + // this gets invoked once the write completes. + @Override + public void operationComplete(Future future) { + if (!future.isSuccess()) { + Throwable error = future.cause(); + if (error instanceof EncoderException + && error.getCause() instanceof FrameTooLongException) { + trackNodeError(node, error.getCause(), NANOTIME_NOT_MEASURED_YET); + setFinalError(statement, error.getCause(), node, execution); + } else { + LOG.trace( + "[{}] Failed to send request on {}, trying next node (cause: {})", + logPrefix, + channel, + error); + recordError(node, error); + trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); + ((DefaultNode) node) + .getMetricUpdater() + .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); + sendRequest( + statement, + null, + queryPlan, + execution, + retryCount, + scheduleNextExecution); // try next node + } + } else { + LOG.trace("[{}] Request sent on {}", logPrefix, channel); + if (result.isDone()) { + // If the handler completed since the last time we checked, cancel directly because we + // don't know if cancelScheduledTasks() has run yet + cancel(); + } else { + inFlightCallbacks.add(this); + if (scheduleNextExecution + && Conversions.resolveIdempotence(statement, executionProfile)) { + int nextExecution = execution + 1; + long nextDelay; + try { + nextDelay = + Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) + .nextExecution(node, null, statement, nextExecution); + } catch (Throwable cause) { + // This is a bug in the policy, but not fatal since we have at least one other + // execution already running. Don't fail the whole request. + LOG.error( + "[{}] Unexpected error while invoking the speculative execution policy", + logPrefix, + cause); + return; + } + if (nextDelay >= 0) { + scheduleSpeculativeExecution(nextExecution, nextDelay); + } else { + LOG.trace( + "[{}] Speculative execution policy returned {}, no next execution", + logPrefix, + nextDelay); + } + } + } + } + } + + private void scheduleSpeculativeExecution(int index, long delay) { + LOG.trace("[{}] Scheduling speculative execution {} in {} ms", logPrefix, index, delay); + try { + scheduledExecutions.add( + timer.newTimeout( + (Timeout timeout1) -> { + if (!result.isDone()) { + LOG.trace( + "[{}] Starting speculative execution {}", + GraphRequestHandler.this.logPrefix, + index); + activeExecutionsCount.incrementAndGet(); + startedSpeculativeExecutionsCount.incrementAndGet(); + // Note that `node` is the first node of the execution, it might not be the + // "slow" one if there were retries, but in practice retries are rare. + ((DefaultNode) node) + .getMetricUpdater() + .incrementCounter( + DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); + sendRequest(statement, null, queryPlan, index, 0, true); + } + }, + delay, + TimeUnit.MILLISECONDS)); + } catch (IllegalStateException e) { + // If we're racing with session shutdown, the timer might be stopped already. We don't want + // to schedule more executions anyway, so swallow the error. + if (!"cannot be started once stopped".equals(e.getMessage())) { + Loggers.warnWithException( + LOG, "[{}] Error while scheduling speculative execution", logPrefix, e); + } + } + } + + @Override + public void onResponse(Frame responseFrame) { + long nodeResponseTimeNanos = NANOTIME_NOT_MEASURED_YET; + NodeMetricUpdater nodeMetricUpdater = ((DefaultNode) node).getMetricUpdater(); + if (nodeMetricUpdater.isEnabled(DseNodeMetric.GRAPH_MESSAGES, executionProfile.getName())) { + nodeResponseTimeNanos = System.nanoTime(); + long nodeLatency = System.nanoTime() - nodeStartTimeNanos; + nodeMetricUpdater.updateTimer( + DseNodeMetric.GRAPH_MESSAGES, + executionProfile.getName(), + nodeLatency, + TimeUnit.NANOSECONDS); + } + inFlightCallbacks.remove(this); + if (result.isDone()) { + return; + } + try { + Message responseMessage = responseFrame.message; + if (responseMessage instanceof Result) { + LOG.trace("[{}] Got result, completing", logPrefix); + setFinalResult((Result) responseMessage, responseFrame, this); + } else if (responseMessage instanceof Error) { + LOG.trace("[{}] Got error response, processing", logPrefix); + processErrorResponse((Error) responseMessage); + } else { + trackNodeError( + node, + new IllegalStateException("Unexpected response " + responseMessage), + nodeResponseTimeNanos); + setFinalError( + statement, + new IllegalStateException("Unexpected response " + responseMessage), + node, + execution); + } + } catch (Throwable t) { + trackNodeError(node, t, nodeResponseTimeNanos); + setFinalError(statement, t, node, execution); + } + } + + private void processErrorResponse(Error errorMessage) { + CoordinatorException error = Conversions.toThrowable(node, errorMessage, context); + NodeMetricUpdater metricUpdater = ((DefaultNode) node).getMetricUpdater(); + if (error instanceof BootstrappingException) { + LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); + recordError(node, error); + trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); + sendRequest(statement, null, queryPlan, execution, retryCount, false); + } else if (error instanceof QueryValidationException + || error instanceof FunctionFailureException + || error instanceof ProtocolError) { + LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); + metricUpdater.incrementCounter(DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); + trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); + setFinalError(statement, error, node, execution); + } else { + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + RetryVerdict verdict; + if (error instanceof ReadTimeoutException) { + ReadTimeoutException readTimeout = (ReadTimeoutException) error; + verdict = + retryPolicy.onReadTimeoutVerdict( + statement, + readTimeout.getConsistencyLevel(), + readTimeout.getBlockFor(), + readTimeout.getReceived(), + readTimeout.wasDataPresent(), + retryCount); + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.READ_TIMEOUTS, + DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, + DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); + } else if (error instanceof WriteTimeoutException) { + WriteTimeoutException writeTimeout = (WriteTimeoutException) error; + verdict = + Conversions.resolveIdempotence(statement, executionProfile) + ? retryPolicy.onWriteTimeoutVerdict( + statement, + writeTimeout.getConsistencyLevel(), + writeTimeout.getWriteType(), + writeTimeout.getBlockFor(), + writeTimeout.getReceived(), + retryCount) + : RetryVerdict.RETHROW; + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.WRITE_TIMEOUTS, + DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, + DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); + } else if (error instanceof UnavailableException) { + UnavailableException unavailable = (UnavailableException) error; + verdict = + retryPolicy.onUnavailableVerdict( + statement, + unavailable.getConsistencyLevel(), + unavailable.getRequired(), + unavailable.getAlive(), + retryCount); + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.UNAVAILABLES, + DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, + DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); + } else { + verdict = + Conversions.resolveIdempotence(statement, executionProfile) + ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) + : RetryVerdict.RETHROW; + updateErrorMetrics( + metricUpdater, + verdict, + DefaultNodeMetric.OTHER_ERRORS, + DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, + DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); + } + processRetryVerdict(verdict, error); + } + } + + private void processRetryVerdict(RetryVerdict verdict, Throwable error) { + LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); + switch (verdict.getRetryDecision()) { + case RETRY_SAME: + recordError(node, error); + trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); + sendRequest( + verdict.getRetryRequest(statement), + node, + queryPlan, + execution, + retryCount + 1, + false); + break; + case RETRY_NEXT: + recordError(node, error); + trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); + sendRequest( + verdict.getRetryRequest(statement), + null, + queryPlan, + execution, + retryCount + 1, + false); + break; + case RETHROW: + trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); + setFinalError(statement, error, node, execution); + break; + case IGNORE: + setFinalResult(Void.INSTANCE, null, this); + break; + } + } + + private void updateErrorMetrics( + NodeMetricUpdater metricUpdater, + RetryVerdict verdict, + DefaultNodeMetric error, + DefaultNodeMetric retriesOnError, + DefaultNodeMetric ignoresOnError) { + metricUpdater.incrementCounter(error, executionProfile.getName()); + switch (verdict.getRetryDecision()) { + case RETRY_SAME: + case RETRY_NEXT: + metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); + metricUpdater.incrementCounter(retriesOnError, executionProfile.getName()); + break; + case IGNORE: + metricUpdater.incrementCounter(DefaultNodeMetric.IGNORES, executionProfile.getName()); + metricUpdater.incrementCounter(ignoresOnError, executionProfile.getName()); + break; + case RETHROW: + // nothing do do + } + } + + @Override + public void onFailure(Throwable error) { + inFlightCallbacks.remove(this); + if (result.isDone()) { + return; + } + LOG.trace("[{}] Request failure, processing: {}", logPrefix, error); + RetryVerdict verdict; + if (!Conversions.resolveIdempotence(statement, executionProfile) + || error instanceof FrameTooLongException) { + verdict = RetryVerdict.RETHROW; + } else { + try { + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); + } catch (Throwable cause) { + setFinalError( + statement, + new IllegalStateException("Unexpected error while invoking the retry policy", cause), + node, + NO_SUCCESSFUL_EXECUTION); + return; + } + } + processRetryVerdict(verdict, error); + updateErrorMetrics( + ((DefaultNode) node).getMetricUpdater(), + verdict, + DefaultNodeMetric.ABORTED_REQUESTS, + DefaultNodeMetric.RETRIES_ON_ABORTED, + DefaultNodeMetric.IGNORES_ON_ABORTED); + } + + void cancel() { + try { + if (!channel.closeFuture().isDone()) { + this.channel.cancel(this); + } + } catch (Throwable t) { + Loggers.warnWithException(LOG, "[{}] Error cancelling", logPrefix, t); + } + } + + /** + * @param nodeResponseTimeNanos the time we received the response, if it's already been + * measured. If {@link #NANOTIME_NOT_MEASURED_YET}, it hasn't and we need to measure it now + * (this is to avoid unnecessary calls to System.nanoTime) + */ + private void trackNodeError(Node node, Throwable error, long nodeResponseTimeNanos) { + if (requestTracker instanceof NoopRequestTracker) { + return; + } + if (nodeResponseTimeNanos == NANOTIME_NOT_MEASURED_YET) { + nodeResponseTimeNanos = System.nanoTime(); + } + long latencyNanos = nodeResponseTimeNanos - this.nodeStartTimeNanos; + requestTracker.onNodeError(statement, error, latencyNanos, executionProfile, node, logPrefix); + } + + @Override + public String toString() { + return logPrefix; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java new file mode 100644 index 00000000000..bc2381482a8 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphRequestSyncProcessor.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class GraphRequestSyncProcessor + implements RequestProcessor, GraphResultSet> { + + private final GraphRequestAsyncProcessor asyncProcessor; + + public GraphRequestSyncProcessor(GraphRequestAsyncProcessor asyncProcessor) { + this.asyncProcessor = asyncProcessor; + } + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return (request instanceof ScriptGraphStatement + || request instanceof FluentGraphStatement + || request instanceof BatchGraphStatement + || request instanceof BytecodeGraphStatement) + && resultType.equals(GraphStatement.SYNC); + } + + @Override + public GraphResultSet process( + GraphStatement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + BlockingOperation.checkNotDriverThread(); + AsyncGraphResultSet firstPage = + CompletableFutures.getUninterruptibly( + asyncProcessor.process(request, session, context, sessionLogPrefix)); + return GraphResultSets.toSync(firstPage); + } + + @Override + public GraphResultSet newFailure(RuntimeException error) { + throw error; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java new file mode 100644 index 00000000000..7e9043affec --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultIterator.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import java.util.Queue; +import net.jcip.annotations.NotThreadSafe; +import org.apache.tinkerpop.gremlin.process.traversal.Traverser; + +@NotThreadSafe // wraps a mutable queue +class GraphResultIterator extends CountingIterator { + + private final Queue data; + private final GraphProtocol graphProtocol; + + // Sometimes a traversal can yield the same result multiple times consecutively. To avoid + // duplicating the data, DSE graph sends it only once with a counter indicating how many times + // it's repeated. + private long repeat = 0; + private GraphNode lastGraphNode = null; + + GraphResultIterator(Queue data, GraphProtocol graphProtocol) { + super(data.size()); + this.data = data; + this.graphProtocol = graphProtocol; + } + + @Override + protected GraphNode computeNext() { + if (repeat > 1) { + repeat -= 1; + // Note that we don't make a defensive copy, we assume the client won't mutate the node + return lastGraphNode; + } + + GraphNode container = data.poll(); + if (container == null) { + return endOfData(); + } + + if (graphProtocol.isGraphBinary()) { + // results are contained in a Traverser object and not a Map if the protocol + // is GraphBinary + Preconditions.checkState( + container.as(Object.class) instanceof Traverser, + "Graph protocol error. Received object should be a Traverser but it is not."); + Traverser t = container.as(Traverser.class); + this.repeat = t.bulk(); + this.lastGraphNode = new ObjectGraphNode(t.get()); + return lastGraphNode; + } else { + // The repeat counter is called "bulk" in the JSON payload + GraphNode b = container.getByKey("bulk"); + if (b != null) { + this.repeat = b.asLong(); + } + + lastGraphNode = container.getByKey("result"); + return lastGraphNode; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java new file mode 100644 index 00000000000..fb21f857cfa --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphResultSets.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; + +public class GraphResultSets { + + public static GraphResultSet toSync(AsyncGraphResultSet firstPage) { + if (firstPage.hasMorePages()) { + return new MultiPageGraphResultSet(firstPage); + } else { + return new SinglePageGraphResultSet(firstPage); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java new file mode 100644 index 00000000000..f880bca3764 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON1SerdeTP.java @@ -0,0 +1,346 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.net.InetAddresses; +import java.io.IOException; +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.util.List; +import java.util.Map; +import org.apache.tinkerpop.shaded.jackson.core.JsonGenerator; +import org.apache.tinkerpop.shaded.jackson.core.JsonParseException; +import org.apache.tinkerpop.shaded.jackson.core.JsonParser; +import org.apache.tinkerpop.shaded.jackson.core.Version; +import org.apache.tinkerpop.shaded.jackson.databind.DeserializationContext; +import org.apache.tinkerpop.shaded.jackson.databind.JsonDeserializer; +import org.apache.tinkerpop.shaded.jackson.databind.JsonSerializer; +import org.apache.tinkerpop.shaded.jackson.databind.SerializerProvider; +import org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer; +import org.apache.tinkerpop.shaded.jackson.databind.module.SimpleModule; +import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; + +public class GraphSON1SerdeTP { + + //////////////////////// DESERIALIZERS //////////////////////// + + /** + * Default deserializer used by the driver for {@link InetAddress} instances. The actual subclass + * returned by this deserializer depends on the type of address: {@link Inet4Address IPV4} or + * {@link Inet6Address IPV6}. + */ + static class DefaultInetAddressDeserializer extends StdDeserializer { + + private static final long serialVersionUID = 1L; + + private final Class inetClass; + + DefaultInetAddressDeserializer(Class inetClass) { + super(inetClass); + this.inetClass = inetClass; + } + + @Override + public T deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { + String ip = parser.readValueAs(String.class); + try { + InetAddress inet = InetAddresses.forString(ip); + return inetClass.cast(inet); + } catch (ClassCastException e) { + throw new JsonParseException( + parser, + String.format("Inet address cannot be cast to %s: %s", inetClass.getSimpleName(), ip), + e); + } catch (IllegalArgumentException e) { + throw new JsonParseException(parser, String.format("Expected inet address, got %s", ip), e); + } + } + } + + /** + * Default deserializer used by the driver for geospatial types. It deserializes such types into + * {@link Geometry} instances. The actual subclass depends on the type being deserialized. + */ + static class DefaultGeometryDeserializer extends StdDeserializer { + + private static final long serialVersionUID = 1L; + + private final Class geometryClass; + + DefaultGeometryDeserializer(Class geometryClass) { + super(geometryClass); + this.geometryClass = geometryClass; + } + + @Override + public T deserialize(JsonParser parser, DeserializationContext ctx) throws IOException { + String wkt = parser.readValueAs(String.class); + Geometry geometry; + if (wkt.startsWith("POINT")) geometry = Point.fromWellKnownText(wkt); + else if (wkt.startsWith("LINESTRING")) geometry = LineString.fromWellKnownText(wkt); + else if (wkt.startsWith("POLYGON")) geometry = Polygon.fromWellKnownText(wkt); + else throw new JsonParseException(parser, "Unknown geometry type: " + wkt); + return geometryClass.cast(geometry); + } + } + + /** Base class for serializing the {@code java.time.*} types to ISO-8061 formats. */ + abstract static class AbstractJavaTimeSerializer extends StdSerializer { + + private static final long serialVersionUID = 1L; + + AbstractJavaTimeSerializer(final Class clazz) { + super(clazz); + } + + @Override + public void serialize( + final T value, final JsonGenerator gen, final SerializerProvider serializerProvider) + throws IOException { + gen.writeString(value.toString()); + } + } + + /** Base class for deserializing the {@code java.time.*} types from ISO-8061 formats. */ + abstract static class AbstractJavaTimeJacksonDeserializer extends StdDeserializer { + + private static final long serialVersionUID = 1L; + + AbstractJavaTimeJacksonDeserializer(final Class clazz) { + super(clazz); + } + + abstract T parse(final String val); + + @Override + public T deserialize( + final JsonParser jsonParser, final DeserializationContext deserializationContext) + throws IOException { + return parse(jsonParser.getText()); + } + } + + static final class DurationJacksonSerializer + extends AbstractJavaTimeSerializer { + + private static final long serialVersionUID = 1L; + + DurationJacksonSerializer() { + super(java.time.Duration.class); + } + } + + static final class DurationJacksonDeserializer + extends AbstractJavaTimeJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + DurationJacksonDeserializer() { + super(java.time.Duration.class); + } + + @Override + public java.time.Duration parse(final String val) { + return java.time.Duration.parse(val); + } + } + + static final class InstantJacksonSerializer + extends AbstractJavaTimeSerializer { + + private static final long serialVersionUID = 1L; + + InstantJacksonSerializer() { + super(java.time.Instant.class); + } + } + + static final class InstantJacksonDeserializer + extends AbstractJavaTimeJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + InstantJacksonDeserializer() { + super(java.time.Instant.class); + } + + @Override + public java.time.Instant parse(final String val) { + return java.time.Instant.parse(val); + } + } + + static final class LocalDateJacksonSerializer + extends AbstractJavaTimeSerializer { + + private static final long serialVersionUID = 1L; + + LocalDateJacksonSerializer() { + super(java.time.LocalDate.class); + } + } + + static final class LocalDateJacksonDeserializer + extends AbstractJavaTimeJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + LocalDateJacksonDeserializer() { + super(java.time.LocalDate.class); + } + + @Override + public java.time.LocalDate parse(final String val) { + return java.time.LocalDate.parse(val); + } + } + + static final class LocalTimeJacksonSerializer + extends AbstractJavaTimeSerializer { + + private static final long serialVersionUID = 1L; + + LocalTimeJacksonSerializer() { + super(java.time.LocalTime.class); + } + } + + static final class LocalTimeJacksonDeserializer + extends AbstractJavaTimeJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + LocalTimeJacksonDeserializer() { + super(java.time.LocalTime.class); + } + + @Override + public java.time.LocalTime parse(final String val) { + return java.time.LocalTime.parse(val); + } + } + + //////////////////////// SERIALIZERS //////////////////////// + + /** Default serializer used by the driver for {@link LegacyGraphNode} instances. */ + static class DefaultGraphNodeSerializer extends StdSerializer { + + private static final long serialVersionUID = 1L; + + DefaultGraphNodeSerializer() { + super(LegacyGraphNode.class); + } + + @Override + public void serialize( + LegacyGraphNode value, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) + throws IOException { + jsonGenerator.writeTree(value.getDelegate()); + } + } + + /** + * Default serializer used by the driver for geospatial types. It serializes {@link Geometry} + * instances into their Well-Known Text (WKT) equivalent. + */ + static class DefaultGeometrySerializer extends StdSerializer { + + private static final long serialVersionUID = 1L; + + DefaultGeometrySerializer() { + super(Geometry.class); + } + + @Override + public void serialize( + Geometry value, JsonGenerator jsonGenerator, SerializerProvider serializers) + throws IOException { + jsonGenerator.writeString(value.asWellKnownText()); + } + } + + /** The default Jackson module used by DSE Graph. */ + static class GraphSON1DefaultModule extends SimpleModule { + + private static final long serialVersionUID = 1L; + + GraphSON1DefaultModule(String name, Version version) { + super(name, version, createDeserializers(), createSerializers()); + } + + private static Map, JsonDeserializer> createDeserializers() { + + return ImmutableMap., JsonDeserializer>builder() + + // Inet (there is no built-in deserializer for InetAddress and subclasses) + .put(InetAddress.class, new DefaultInetAddressDeserializer<>(InetAddress.class)) + .put(Inet4Address.class, new DefaultInetAddressDeserializer<>(Inet4Address.class)) + .put(Inet6Address.class, new DefaultInetAddressDeserializer<>(Inet6Address.class)) + + // Geospatial types + .put(Geometry.class, new DefaultGeometryDeserializer<>(Geometry.class)) + .put(Point.class, new DefaultGeometryDeserializer<>(Point.class)) + .put(LineString.class, new DefaultGeometryDeserializer<>(LineString.class)) + .put(Polygon.class, new DefaultGeometryDeserializer<>(Polygon.class)) + .build(); + } + + private static List> createSerializers() { + return ImmutableList.>builder() + .add(new DefaultGraphNodeSerializer()) + .add(new DefaultGeometrySerializer()) + .build(); + } + } + + /** Serializers and deserializers for JSR 310 {@code java.time.*}. */ + static class GraphSON1JavaTimeModule extends SimpleModule { + + private static final long serialVersionUID = 1L; + + GraphSON1JavaTimeModule(String name, Version version) { + super(name, version, createDeserializers(), createSerializers()); + } + + private static Map, JsonDeserializer> createDeserializers() { + + return ImmutableMap., JsonDeserializer>builder() + .put(java.time.Duration.class, new DurationJacksonDeserializer()) + .put(java.time.Instant.class, new InstantJacksonDeserializer()) + .put(java.time.LocalDate.class, new LocalDateJacksonDeserializer()) + .put(java.time.LocalTime.class, new LocalTimeJacksonDeserializer()) + .build(); + } + + private static List> createSerializers() { + return ImmutableList.>builder() + .add(new DurationJacksonSerializer()) + .add(new InstantJacksonSerializer()) + .add(new LocalDateJacksonSerializer()) + .add(new LocalTimeJacksonSerializer()) + .build(); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java new file mode 100644 index 00000000000..d79afc71822 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSON2SerdeTP.java @@ -0,0 +1,430 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.graph.predicates.Geo; +import com.datastax.dse.driver.api.core.graph.predicates.Search; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; +import com.datastax.dse.driver.internal.core.data.geometry.Distance; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.util.AndP; +import org.apache.tinkerpop.gremlin.process.traversal.util.ConnectiveP; +import org.apache.tinkerpop.gremlin.process.traversal.util.OrP; +import org.apache.tinkerpop.gremlin.structure.io.graphson.AbstractObjectDeserializer; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; +import org.apache.tinkerpop.gremlin.structure.io.graphson.TinkerPopJacksonModule; +import org.apache.tinkerpop.shaded.jackson.core.JsonGenerator; +import org.apache.tinkerpop.shaded.jackson.core.JsonParser; +import org.apache.tinkerpop.shaded.jackson.databind.DeserializationContext; +import org.apache.tinkerpop.shaded.jackson.databind.SerializerProvider; +import org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer; +import org.apache.tinkerpop.shaded.jackson.databind.jsontype.TypeSerializer; +import org.apache.tinkerpop.shaded.jackson.databind.module.SimpleModule; +import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdScalarSerializer; +import org.apache.tinkerpop.shaded.jackson.databind.ser.std.StdSerializer; + +public class GraphSON2SerdeTP { + + /** + * A Jackson Module to use for TinkerPop serialization/deserialization. It extends {@link + * org.apache.tinkerpop.gremlin.structure.io.graphson.TinkerPopJacksonModule} because of the + * specific typing format used in GraphSON. + */ + public static class DseGraphModule extends TinkerPopJacksonModule { + + private static final long serialVersionUID = 1L; + + public DseGraphModule() { + super("dse-driver-2.0"); + addSerializer(DefaultPoint.class, new PointGeometrySerializer()); + addSerializer(DefaultLineString.class, new LineStringGeometrySerializer()); + addSerializer(DefaultPolygon.class, new PolygonGeometrySerializer()); + addSerializer(Distance.class, new DistanceGeometrySerializer()); + // override TinkerPop's P predicates because of DSE's Search and Geo predicates + addSerializer(P.class, new DsePJacksonSerializer()); + addSerializer(EditDistance.class, new EditDistanceSerializer()); + + addDeserializer(DefaultLineString.class, new LineStringGeometryDeserializer()); + addDeserializer(DefaultPoint.class, new PointGeometryDeserializer()); + addDeserializer(DefaultPolygon.class, new PolygonGeometryDeserializer()); + addDeserializer(Distance.class, new DistanceGeometryDeserializer()); + // override TinkerPop's P predicates because of DSE's Search and Geo predicates + addDeserializer(P.class, new DsePJacksonDeserializer()); + } + + @SuppressWarnings("rawtypes") + @Override + public Map getTypeDefinitions() { + Map definitions = new HashMap<>(); + definitions.put(DefaultLineString.class, "LineString"); + definitions.put(DefaultPoint.class, "Point"); + definitions.put(DefaultPolygon.class, "Polygon"); + definitions.put(byte[].class, "Blob"); + definitions.put(Distance.class, "Distance"); + definitions.put(P.class, "P"); + return definitions; + } + + @Override + public String getTypeNamespace() { + return "dse"; + } + + abstract static class AbstractGeometryJacksonDeserializer + extends StdDeserializer { + + private static final long serialVersionUID = 1L; + + AbstractGeometryJacksonDeserializer(final Class clazz) { + super(clazz); + } + + public abstract T parse(final String val); + + @Override + public T deserialize( + final JsonParser jsonParser, final DeserializationContext deserializationContext) + throws IOException { + return parse(jsonParser.getText()); + } + } + + abstract static class AbstractGeometryJacksonSerializer + extends StdScalarSerializer { + + private static final long serialVersionUID = 1L; + + AbstractGeometryJacksonSerializer(final Class clazz) { + super(clazz); + } + + @Override + public void serialize( + final T value, final JsonGenerator gen, final SerializerProvider serializerProvider) + throws IOException { + gen.writeString(value.asWellKnownText()); + } + } + + public static class LineStringGeometrySerializer + extends AbstractGeometryJacksonSerializer { + + private static final long serialVersionUID = 1L; + + LineStringGeometrySerializer() { + super(LineString.class); + } + } + + public static class LineStringGeometryDeserializer + extends AbstractGeometryJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + LineStringGeometryDeserializer() { + super(DefaultLineString.class); + } + + @Override + public DefaultLineString parse(final String val) { + return (DefaultLineString) LineString.fromWellKnownText(val); + } + } + + public static class PolygonGeometrySerializer + extends AbstractGeometryJacksonSerializer { + + private static final long serialVersionUID = 1L; + + PolygonGeometrySerializer() { + super(Polygon.class); + } + } + + public static class PolygonGeometryDeserializer + extends AbstractGeometryJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + PolygonGeometryDeserializer() { + super(DefaultPolygon.class); + } + + @Override + public DefaultPolygon parse(final String val) { + return (DefaultPolygon) Polygon.fromWellKnownText(val); + } + } + + public static class PointGeometrySerializer extends AbstractGeometryJacksonSerializer { + + private static final long serialVersionUID = 1L; + + PointGeometrySerializer() { + super(Point.class); + } + } + + public static class PointGeometryDeserializer + extends AbstractGeometryJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + PointGeometryDeserializer() { + super(DefaultPoint.class); + } + + @Override + public DefaultPoint parse(final String val) { + return (DefaultPoint) Point.fromWellKnownText(val); + } + } + + public static class DistanceGeometrySerializer + extends AbstractGeometryJacksonSerializer { + + private static final long serialVersionUID = 1L; + + DistanceGeometrySerializer() { + super(Distance.class); + } + } + + public static class DistanceGeometryDeserializer + extends AbstractGeometryJacksonDeserializer { + + private static final long serialVersionUID = 1L; + + DistanceGeometryDeserializer() { + super(Distance.class); + } + + @Override + public Distance parse(final String val) { + return Distance.fromWellKnownText(val); + } + } + + @SuppressWarnings("rawtypes") + static final class DsePJacksonSerializer extends StdScalarSerializer

{ + + private static final long serialVersionUID = 1L; + + DsePJacksonSerializer() { + super(P.class); + } + + @Override + public void serialize( + final P p, final JsonGenerator jsonGenerator, final SerializerProvider serializerProvider) + throws IOException { + jsonGenerator.writeStartObject(); + jsonGenerator.writeStringField("predicateType", getPredicateType(p)); + jsonGenerator.writeStringField( + GraphSONTokens.PREDICATE, + p instanceof ConnectiveP + ? p instanceof AndP ? GraphSONTokens.AND : GraphSONTokens.OR + : p.getBiPredicate().toString()); + if (p instanceof ConnectiveP) { + jsonGenerator.writeArrayFieldStart(GraphSONTokens.VALUE); + for (final P predicate : ((ConnectiveP) p).getPredicates()) { + jsonGenerator.writeObject(predicate); + } + jsonGenerator.writeEndArray(); + } else { + if (p.getValue() instanceof Collection) { + jsonGenerator.writeArrayFieldStart(GraphSONTokens.VALUE); + for (final Object object : (Collection) p.getValue()) { + jsonGenerator.writeObject(object); + } + jsonGenerator.writeEndArray(); + } else { + jsonGenerator.writeObjectField(GraphSONTokens.VALUE, p.getValue()); + } + } + jsonGenerator.writeEndObject(); + } + + private String getPredicateType(P p) { + if (p.getBiPredicate() instanceof SearchPredicate) { + return Search.class.getSimpleName(); + } else if (p.getBiPredicate() instanceof GeoPredicate) { + return Geo.class.getSimpleName(); + } else { + return P.class.getSimpleName(); + } + } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + static final class DsePJacksonDeserializer extends AbstractObjectDeserializer

{ + + private static final long serialVersionUID = 1L; + + DsePJacksonDeserializer() { + super(P.class); + } + + @Override + public P createObject(final Map data) { + final String predicate = (String) data.get(GraphSONTokens.PREDICATE); + final String predicateType = (String) data.get("predicateType"); + final Object value = data.get(GraphSONTokens.VALUE); + if (predicate.equals(GraphSONTokens.AND) || predicate.equals(GraphSONTokens.OR)) { + return predicate.equals(GraphSONTokens.AND) + ? new AndP((List

) value) + : new OrP((List

) value); + } else { + try { + if (value instanceof Collection) { + if (predicate.equals("between")) { + return P.between(((List) value).get(0), ((List) value).get(1)); + } else if (predicateType.equals(P.class.getSimpleName()) + && predicate.equals("inside")) { + return P.between(((List) value).get(0), ((List) value).get(1)); + } else if (predicate.equals("outside")) { + return P.outside(((List) value).get(0), ((List) value).get(1)); + } else if (predicate.equals("within")) { + return P.within((Collection) value); + } else if (predicate.equals("without")) { + return P.without((Collection) value); + } else { + return (P) + P.class.getMethod(predicate, Collection.class).invoke(null, (Collection) value); + } + } else { + if (predicate.equals(SearchPredicate.prefix.name())) { + return Search.prefix((String) value); + } else if (predicate.equals(SearchPredicate.tokenPrefix.name())) { + return Search.tokenPrefix((String) value); + } else if (predicate.equals(SearchPredicate.regex.name())) { + return Search.regex((String) value); + } else if (predicate.equals(SearchPredicate.tokenRegex.name())) { + return Search.tokenRegex((String) value); + } else if (predicate.equals(SearchPredicate.token.name())) { + return Search.token((String) value); + } else if (predicate.equals(SearchPredicate.fuzzy.name())) { + Map arguments = (Map) value; + return Search.fuzzy( + (String) arguments.get("query"), (int) arguments.get("distance")); + } else if (predicate.equals(SearchPredicate.tokenFuzzy.name())) { + Map arguments = (Map) value; + return Search.tokenFuzzy( + (String) arguments.get("query"), (int) arguments.get("distance")); + } else if (predicate.equals(SearchPredicate.phrase.name())) { + Map arguments = (Map) value; + return Search.phrase( + (String) arguments.get("query"), (int) arguments.get("distance")); + } else if (predicateType.equals(Geo.class.getSimpleName()) + && predicate.equals(GeoPredicate.inside.name())) { + return Geo.inside( + ((Distance) value).getCenter(), + ((Distance) value).getRadius(), + Geo.Unit.DEGREES); + } else if (predicateType.equals(Geo.class.getSimpleName()) + && predicate.equals(GeoPredicate.insideCartesian.name())) { + return Geo.inside(((Distance) value).getCenter(), ((Distance) value).getRadius()); + } else { + return (P) P.class.getMethod(predicate, Object.class).invoke(null, value); + } + } + } catch (final Exception e) { + throw new IllegalStateException(e.getMessage(), e); + } + } + } + } + + public static class EditDistanceSerializer extends StdSerializer { + + private static final long serialVersionUID = 1L; + + EditDistanceSerializer() { + super(EditDistance.class); + } + + @Override + public void serialize( + EditDistance editDistance, JsonGenerator generator, SerializerProvider provider) + throws IOException { + generator.writeObject( + ImmutableMap.of("query", editDistance.query, "distance", editDistance.distance)); + } + + @Override + public void serializeWithType( + EditDistance editDistance, + JsonGenerator generator, + SerializerProvider provider, + TypeSerializer serializer) + throws IOException { + serialize(editDistance, generator, provider); + } + } + } + + public static class DriverObjectsModule extends SimpleModule { + + private static final long serialVersionUID = 1L; + + public DriverObjectsModule() { + super("datastax-driver-module"); + addSerializer(ObjectGraphNode.class, new ObjectGraphNodeGraphSON2Serializer()); + } + + static final class ObjectGraphNodeGraphSON2Serializer extends StdSerializer { + + private static final long serialVersionUID = 1L; + + protected ObjectGraphNodeGraphSON2Serializer() { + super(ObjectGraphNode.class); + } + + @Override + public void serialize( + ObjectGraphNode objectGraphNode, + JsonGenerator jsonGenerator, + SerializerProvider serializerProvider) + throws IOException { + jsonGenerator.writeObject(objectGraphNode.as(Object.class)); + } + + @Override + public void serializeWithType( + ObjectGraphNode objectGraphNode, + JsonGenerator jsonGenerator, + SerializerProvider serializerProvider, + TypeSerializer typeSerializer) + throws IOException { + serialize(objectGraphNode, jsonGenerator, serializerProvider); + } + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java new file mode 100644 index 00000000000..02b35f7ee36 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSONUtils.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.shaded.guava.common.base.Suppliers; +import com.datastax.oss.driver.shaded.guava.common.base.Throwables; +import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; +import com.datastax.oss.driver.shaded.guava.common.cache.CacheLoader; +import com.datastax.oss.driver.shaded.guava.common.cache.LoadingCache; +import com.datastax.oss.protocol.internal.util.Bytes; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.function.Supplier; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONMapper; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONReader; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONVersion; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONXModuleV2d0; +import org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV2d0; +import org.apache.tinkerpop.shaded.jackson.core.Version; +import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper; + +public class GraphSONUtils { + + private static final LoadingCache OBJECT_MAPPERS = + CacheBuilder.newBuilder() + .build( + new CacheLoader() { + @Override + public ObjectMapper load(@NonNull GraphProtocol graphSubProtocol) throws Exception { + switch (graphSubProtocol) { + case GRAPHSON_1_0: + com.datastax.oss.driver.api.core.Version driverVersion = + CqlSession.OSS_DRIVER_COORDINATES.getVersion(); + Version driverJacksonVersion = + new Version( + driverVersion.getMajor(), + driverVersion.getMinor(), + driverVersion.getPatch(), + driverVersion.getPreReleaseLabels() != null + && driverVersion.getPreReleaseLabels().contains("SNAPSHOT") + ? "SNAPSHOT" + : null, + "com.datastax.dse", + "dse-java-driver-core"); + + ObjectMapper mapper = + GraphSONMapper.build() + .version(GraphSONVersion.V1_0) + .create() + .createMapper(); + mapper.registerModule( + new GraphSON1SerdeTP.GraphSON1DefaultModule( + "graph-graphson1default", driverJacksonVersion)); + mapper.registerModule( + new GraphSON1SerdeTP.GraphSON1JavaTimeModule( + "graph-graphson1javatime", driverJacksonVersion)); + + return mapper; + case GRAPHSON_2_0: + return GraphSONMapper.build() + .version(GraphSONVersion.V2_0) + .addCustomModule(GraphSONXModuleV2d0.build().create(false)) + .addRegistry(TinkerIoRegistryV2d0.instance()) + .addCustomModule(new GraphSON2SerdeTP.DseGraphModule()) + .addCustomModule(new GraphSON2SerdeTP.DriverObjectsModule()) + .create() + .createMapper(); + + default: + throw new IllegalStateException( + String.format("GraphSON sub-protocol unknown: {%s}", graphSubProtocol)); + } + } + }); + + static final Supplier GRAPHSON1_READER = + Suppliers.memoize( + () -> + GraphSONReader.build() + .mapper(GraphSONMapper.build().version(GraphSONVersion.V1_0).create()) + .create()); + + public static ByteBuffer serializeToByteBuffer(Object object, GraphProtocol graphSubProtocol) + throws IOException { + return ByteBuffer.wrap(serializeToBytes(object, graphSubProtocol)); + } + + static byte[] serializeToBytes(Object object, GraphProtocol graphSubProtocol) throws IOException { + try { + return OBJECT_MAPPERS.get(graphSubProtocol).writeValueAsBytes(object); + } catch (ExecutionException e) { + Throwables.throwIfUnchecked(e); + throw new RuntimeException(e); + } + } + + public static GraphNode createGraphNode(List data, GraphProtocol graphSubProtocol) + throws IOException { + try { + ObjectMapper mapper = OBJECT_MAPPERS.get(graphSubProtocol); + switch (graphSubProtocol) { + case GRAPHSON_1_0: + return new LegacyGraphNode(mapper.readTree(Bytes.getArray(data.get(0))), mapper); + case GRAPHSON_2_0: + return new ObjectGraphNode(mapper.readValue(Bytes.getArray(data.get(0)), Object.class)); + default: + // Should already be caught when we lookup in the cache + throw new AssertionError( + String.format("Unknown GraphSON sub-protocol: {%s}", graphSubProtocol)); + } + } catch (ExecutionException e) { + Throwables.throwIfUnchecked(e); + throw new RuntimeException(e); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java new file mode 100644 index 00000000000..b8baa2f5e49 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBase.java @@ -0,0 +1,413 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public abstract class GraphStatementBase> + implements GraphStatement { + private final Boolean isIdempotent; + private final Duration timeout; + private final Node node; + private final long timestamp; + private final DriverExecutionProfile executionProfile; + private final String executionProfileName; + private final Map customPayload; + private final String graphName; + private final String traversalSource; + private final String subProtocol; + private final ConsistencyLevel consistencyLevel; + private final ConsistencyLevel readConsistencyLevel; + private final ConsistencyLevel writeConsistencyLevel; + + protected GraphStatementBase( + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel) { + this.isIdempotent = isIdempotent; + this.timeout = timeout; + this.node = node; + this.timestamp = timestamp; + this.executionProfile = executionProfile; + this.executionProfileName = executionProfileName; + this.customPayload = customPayload; + this.graphName = graphName; + this.traversalSource = traversalSource; + this.subProtocol = subProtocol; + this.consistencyLevel = consistencyLevel; + this.readConsistencyLevel = readConsistencyLevel; + this.writeConsistencyLevel = writeConsistencyLevel; + } + + protected abstract SelfT newInstance( + Boolean isIdempotent, + Duration timeout, + Node node, + long timestamp, + DriverExecutionProfile executionProfile, + String executionProfileName, + Map customPayload, + String graphName, + String traversalSource, + String subProtocol, + ConsistencyLevel consistencyLevel, + ConsistencyLevel readConsistencyLevel, + ConsistencyLevel writeConsistencyLevel); + + @Override + public Boolean isIdempotent() { + return isIdempotent; + } + + @NonNull + @Override + public SelfT setIdempotent(@Nullable Boolean newIdempotence) { + return newInstance( + newIdempotence, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public Duration getTimeout() { + return timeout; + } + + @NonNull + @Override + public SelfT setTimeout(@Nullable Duration newTimeout) { + return newInstance( + isIdempotent, + newTimeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public Node getNode() { + return node; + } + + @NonNull + @Override + public SelfT setNode(@Nullable Node newNode) { + return newInstance( + isIdempotent, + timeout, + newNode, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Override + public long getTimestamp() { + return this.timestamp; + } + + @NonNull + @Override + public SelfT setTimestamp(long newTimestamp) { + return newInstance( + isIdempotent, + timeout, + node, + newTimestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public DriverExecutionProfile getExecutionProfile() { + return executionProfile; + } + + @NonNull + @Override + public SelfT setExecutionProfile(@Nullable DriverExecutionProfile newExecutionProfile) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + newExecutionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public String getExecutionProfileName() { + return executionProfileName; + } + + @NonNull + @Override + public SelfT setExecutionProfileName(@Nullable String newExecutionProfileName) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + newExecutionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @NonNull + @Override + public Map getCustomPayload() { + return customPayload; + } + + @NonNull + @Override + public SelfT setCustomPayload(@NonNull Map newCustomPayload) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + newCustomPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public String getGraphName() { + return graphName; + } + + @NonNull + @Override + public SelfT setGraphName(@Nullable String newGraphName) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + newGraphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public String getTraversalSource() { + return traversalSource; + } + + @NonNull + @Override + public SelfT setTraversalSource(@Nullable String newTraversalSource) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + newTraversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public String getSubProtocol() { + return subProtocol; + } + + @NonNull + @Override + public SelfT setSubProtocol(@Nullable String newSubProtocol) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + newSubProtocol, + consistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public ConsistencyLevel getConsistencyLevel() { + return consistencyLevel; + } + + @Override + public SelfT setConsistencyLevel(@Nullable ConsistencyLevel newConsistencyLevel) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + newConsistencyLevel, + readConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public ConsistencyLevel getReadConsistencyLevel() { + return readConsistencyLevel; + } + + @NonNull + @Override + public SelfT setReadConsistencyLevel(@Nullable ConsistencyLevel newReadConsistencyLevel) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + newReadConsistencyLevel, + writeConsistencyLevel); + } + + @Nullable + @Override + public ConsistencyLevel getWriteConsistencyLevel() { + return writeConsistencyLevel; + } + + @NonNull + @Override + public SelfT setWriteConsistencyLevel(@Nullable ConsistencyLevel newWriteConsistencyLevel) { + return newInstance( + isIdempotent, + timeout, + node, + timestamp, + executionProfile, + executionProfileName, + customPayload, + graphName, + traversalSource, + subProtocol, + consistencyLevel, + readConsistencyLevel, + newWriteConsistencyLevel); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java new file mode 100644 index 00000000000..6e586bbcf3f --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/GraphSupportChecker.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.PagingEnabledOptions; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.dse.driver.internal.core.DseProtocolFeature; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collection; +import java.util.Objects; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GraphSupportChecker { + + private static final Logger LOG = LoggerFactory.getLogger(GraphSupportChecker.class); + + /** + * The minimum DSE version supporting both graph paging and the GraphBinary sub-protocol is DSE + * 6.8. + */ + private static final Version MIN_DSE_VERSION_GRAPH_BINARY_AND_PAGING = + Objects.requireNonNull(Version.parse("6.8.0")); + + private volatile Boolean contextGraphPagingEnabled; + private volatile Boolean isDse68OrAbove; + + /** + * Checks whether graph paging is available. + * + *

Graph paging is available if: + * + *

    + *
  1. Continuous paging is generally available (this implies protocol version {@link + * com.datastax.dse.driver.api.core.DseProtocolVersion#DSE_V1 DSE_V1} or higher); + *
  2. Graph paging is set to ENABLED or AUTO in the configuration + * with {@link DseDriverOption#GRAPH_PAGING_ENABLED}; + *
  3. If graph paging is set to AUTO, then a check will be performed to verify + * that all hosts are running DSE 6.8+; if that is the case, then graph paging will be + * assumed to be available. + *
+ * + * Note that the hosts check will be done only once, then memoized; if other hosts join the + * cluster later and do not support graph paging, the user has to manually disable graph paging. + */ + public boolean isPagingEnabled( + @NonNull GraphStatement graphStatement, @NonNull InternalDriverContext context) { + DriverExecutionProfile driverExecutionProfile = + Conversions.resolveExecutionProfile(graphStatement, context); + PagingEnabledOptions pagingEnabledOptions = + PagingEnabledOptions.valueOf( + driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)); + if (LOG.isTraceEnabled()) { + LOG.trace("GRAPH_PAGING_ENABLED: {}", pagingEnabledOptions); + } + if (pagingEnabledOptions == PagingEnabledOptions.DISABLED) { + return false; + } else if (pagingEnabledOptions == PagingEnabledOptions.ENABLED) { + return true; + } else { + return isContextGraphPagingEnabled(context); + } + } + + /** + * Infers the {@link GraphProtocol} to use to execute the given statement. + * + *

The graph protocol is computed as follows: + * + *

    + *
  1. If the statement declares the protocol to use with {@link + * GraphStatement#getSubProtocol()}, then that protocol is returned. + *
  2. If the driver configuration explicitly defines the protocol to use (see {@link + * DseDriverOption#GRAPH_SUB_PROTOCOL} and reference.conf), then that protocol is returned. + *
  3. Otherwise, the graph protocol to use is determined by the DSE version of hosts in the + * cluster. If any host has DSE version 6.7.x or lower, the default graph protocol is {@link + * GraphProtocol#GRAPHSON_2_0}. If all hosts have DSE version 6.8.0 or higher, the default + * graph protocol is {@link GraphProtocol#GRAPH_BINARY_1_0}. + *
+ * + * Note that the hosts check will be done only once, then memoized; if other hosts join the and do + * not support the computed graph protocol, the user has to manually set the graph protocol to + * use. + * + *

Also note that GRAPH_BINARY_1_0 can only be used with "core" graph engines; if + * you are targeting a "classic" graph engine instead, the user has to manually set the graph + * protocol to something else. + */ + @NonNull + public GraphProtocol inferGraphProtocol( + @NonNull GraphStatement statement, + @NonNull DriverExecutionProfile config, + @NonNull InternalDriverContext context) { + String graphProtocol = statement.getSubProtocol(); + if (graphProtocol == null) { + // use the protocol specified in configuration, otherwise get the default from the context + graphProtocol = + config.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL) + ? config.getString(DseDriverOption.GRAPH_SUB_PROTOCOL) + : getDefaultGraphProtocol(context).toInternalCode(); + } + // should not be null because we call config.getString() with a default value + Objects.requireNonNull( + graphProtocol, + "Could not determine the graph protocol for the query. This is a bug, please report."); + + return GraphProtocol.fromString(graphProtocol); + } + + private boolean isContextGraphPagingEnabled(InternalDriverContext context) { + if (contextGraphPagingEnabled == null) { + ProtocolVersion protocolVersion = context.getProtocolVersion(); + if (!context + .getProtocolVersionRegistry() + .supports(protocolVersion, DseProtocolFeature.CONTINUOUS_PAGING)) { + contextGraphPagingEnabled = false; + } else { + if (isDse68OrAbove == null) { + isDse68OrAbove = checkIsDse68OrAbove(context); + } + contextGraphPagingEnabled = isDse68OrAbove; + } + } + return contextGraphPagingEnabled; + } + + /** + * Determines the default {@link GraphProtocol} for the given context. + * + * @return The default GraphProtocol to used based on the provided context. + */ + @VisibleForTesting + GraphProtocol getDefaultGraphProtocol(@NonNull InternalDriverContext context) { + if (isDse68OrAbove == null) { + isDse68OrAbove = checkIsDse68OrAbove(context); + } + // if the DSE version can't be determined, default to GraphSON 2.0 + return isDse68OrAbove ? GraphProtocol.GRAPH_BINARY_1_0 : GraphProtocol.GRAPHSON_2_0; + } + + private boolean checkIsDse68OrAbove(@NonNull InternalDriverContext context) { + Collection nodes = context.getMetadataManager().getMetadata().getNodes().values(); + + for (Node node : nodes) { + Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); + if (dseVersion == null || dseVersion.compareTo(MIN_DSE_VERSION_GRAPH_BINARY_AND_PAGING) < 0) { + return false; + } + } + return true; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java new file mode 100644 index 00000000000..1749bf00873 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/LegacyGraphNode.java @@ -0,0 +1,323 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.shaded.guava.common.base.Objects; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; +import java.util.Set; +import net.jcip.annotations.Immutable; +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONTokens; +import org.apache.tinkerpop.gremlin.structure.util.Attachable; +import org.apache.tinkerpop.shaded.jackson.core.JsonParser; +import org.apache.tinkerpop.shaded.jackson.databind.JavaType; +import org.apache.tinkerpop.shaded.jackson.databind.JsonNode; +import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper; + +/** + * Legacy implementation for GraphSON 1 results. + * + *

The server returns plain JSON with no type information. The driver works with the JSON + * representation directly. + */ +@Immutable +public class LegacyGraphNode implements GraphNode { + private static final String TYPE = "type"; + private static final String VERTEX_TYPE = "vertex"; + private static final String EDGE_TYPE = "edge"; + + private static final GenericType> LIST_TYPE = GenericType.listOf(Object.class); + private static final GenericType> MAP_TYPE = + GenericType.mapOf(String.class, Object.class); + + private final JsonNode delegate; + private final ObjectMapper objectMapper; + + public LegacyGraphNode(JsonNode delegate, ObjectMapper objectMapper) { + Preconditions.checkNotNull(delegate); + Preconditions.checkNotNull(objectMapper); + this.delegate = delegate; + this.objectMapper = objectMapper; + } + + /** + * The underlying JSON representation. + * + *

This is an implementation detail, it's only exposed through the internal API. + */ + public JsonNode getDelegate() { + return delegate; + } + + /** + * The object mapper used to deserialize results in {@link #as(Class)} and {@link + * #as(GenericType)}. + * + *

This is an implementation detail, it's only exposed through the internal API. + */ + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + @Override + public boolean isNull() { + return delegate.isNull(); + } + + @Override + public boolean isMap() { + return delegate.isObject(); + } + + @Override + public Iterable keys() { + return (Iterable) delegate::fieldNames; + } + + @Override + public LegacyGraphNode getByKey(Object key) { + if (!(key instanceof String)) { + return null; + } + JsonNode node = delegate.get(((String) key)); + if (node == null) { + return null; + } + return new LegacyGraphNode(node, objectMapper); + } + + @Override + @SuppressWarnings("unchecked") + public Map asMap() { + return (Map) as(MAP_TYPE); + } + + @Override + public boolean isList() { + return delegate.isArray(); + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public LegacyGraphNode getByIndex(int index) { + JsonNode node = delegate.get(index); + if (node == null) { + return null; + } + return new LegacyGraphNode(node, objectMapper); + } + + @Override + @SuppressWarnings("unchecked") + public List asList() { + return (List) as(LIST_TYPE); + } + + @Override + public boolean isValue() { + return delegate.isValueNode(); + } + + @Override + public int asInt() { + return delegate.asInt(); + } + + @Override + public boolean asBoolean() { + return delegate.asBoolean(); + } + + @Override + public long asLong() { + return delegate.asLong(); + } + + @Override + public double asDouble() { + return delegate.asDouble(); + } + + @Override + public String asString() { + return delegate.asText(); + } + + @Override + public boolean isVertex() { + return isType(VERTEX_TYPE); + } + + @Override + public Vertex asVertex() { + try { + return GraphSONUtils.GRAPHSON1_READER + .get() + .readVertex( + new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), + null, + null, + null); + } catch (IOException e) { + throw new UncheckedIOException("Could not deserialize node as Vertex.", e); + } + } + + @Override + public boolean isEdge() { + return isType(EDGE_TYPE); + } + + @Override + public Edge asEdge() { + try { + return GraphSONUtils.GRAPHSON1_READER + .get() + .readEdge( + new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), + Attachable::get); + } catch (IOException e) { + throw new UncheckedIOException("Could not deserialize node as Edge.", e); + } + } + + @Override + public boolean isPath() { + return false; + } + + @Override + public Path asPath() { + throw new UnsupportedOperationException( + "GraphSON1 does not support Path, use another Graph sub-protocol such as GraphSON2."); + } + + @Override + public boolean isProperty() { + return delegate.has(GraphSONTokens.KEY) && delegate.has(GraphSONTokens.VALUE); + } + + @Override + @SuppressWarnings("unchecked") + public Property asProperty() { + try { + return GraphSONUtils.GRAPHSON1_READER + .get() + .readProperty( + new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), + Attachable::get); + } catch (IOException e) { + throw new UncheckedIOException("Could not deserialize node as Property.", e); + } + } + + @Override + public boolean isVertexProperty() { + return delegate.has(GraphSONTokens.ID) + && delegate.has(GraphSONTokens.VALUE) + && delegate.has(GraphSONTokens.LABEL); + } + + @Override + @SuppressWarnings("unchecked") + public VertexProperty asVertexProperty() { + try { + return GraphSONUtils.GRAPHSON1_READER + .get() + .readVertexProperty( + new ByteArrayInputStream(delegate.toString().getBytes(StandardCharsets.UTF_8)), + Attachable::get); + } catch (IOException e) { + throw new UncheckedIOException("Could not deserialize node as VertexProperty.", e); + } + } + + @Override + public boolean isSet() { + return false; + } + + @Override + public Set asSet() { + throw new UnsupportedOperationException( + "GraphSON1 does not support Set, use another Graph sub-protocol such as GraphSON2."); + } + + @Override + public ResultT as(Class clazz) { + try { + return objectMapper.treeToValue(delegate, clazz); + } catch (IOException e) { + throw new UncheckedIOException("Could not deserialize node as: " + clazz, e); + } + } + + @Override + public ResultT as(GenericType type) { + try { + JsonParser parser = objectMapper.treeAsTokens(delegate); + JavaType javaType = objectMapper.constructType(type.__getToken().getType()); + return objectMapper.readValue(parser, javaType); + } catch (IOException e) { + throw new UncheckedIOException("Could not deserialize node as: " + type, e); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LegacyGraphNode)) { + return false; + } + LegacyGraphNode that = (LegacyGraphNode) o; + return Objects.equal(delegate, that.delegate); + } + + @Override + public int hashCode() { + return Objects.hashCode(delegate); + } + + @Override + public String toString() { + return delegate.toString(); + } + + private boolean isType(String expectedTypeName) { + JsonNode type = delegate.get(TYPE); + return type != null && expectedTypeName.equals(type.asText()); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java new file mode 100644 index 00000000000..fe81d73ba00 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/MultiPageGraphResultSet.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +public class MultiPageGraphResultSet implements GraphResultSet { + private final RowIterator iterator; + private final List executionInfos = new ArrayList<>(); + + public MultiPageGraphResultSet(AsyncGraphResultSet firstPage) { + iterator = new RowIterator(firstPage); + executionInfos.add(firstPage.getRequestExecutionInfo()); + } + + @Override + public void cancel() { + iterator.cancel(); + } + + @NonNull + @Override + public ExecutionInfo getRequestExecutionInfo() { + return executionInfos.get(executionInfos.size() - 1); + } + + @NonNull + @Override + @Deprecated + public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { + return GraphExecutionInfoConverter.convert(getRequestExecutionInfo()); + } + + /** + * The execution information for all the queries that have been performed so far to assemble this + * iterable. + * + *

This will have multiple elements if the query is paged, since the driver performs blocking + * background queries to fetch additional pages transparently as the result set is being iterated. + */ + @NonNull + public List getRequestExecutionInfos() { + return executionInfos; + } + + /** @deprecated use {@link #getRequestExecutionInfos()} instead. */ + @NonNull + @Deprecated + public List getExecutionInfos() { + return Lists.transform(executionInfos, GraphExecutionInfoConverter::convert); + } + + @NonNull + @Override + public Iterator iterator() { + return iterator; + } + + public class RowIterator extends CountingIterator { + private AsyncGraphResultSet currentPage; + private Iterator currentRows; + private boolean cancelled = false; + + private RowIterator(AsyncGraphResultSet firstPage) { + super(firstPage.remaining()); + currentPage = firstPage; + currentRows = firstPage.currentPage().iterator(); + } + + @Override + protected GraphNode computeNext() { + maybeMoveToNextPage(); + return currentRows.hasNext() ? currentRows.next() : endOfData(); + } + + private void maybeMoveToNextPage() { + if (!cancelled && !currentRows.hasNext() && currentPage.hasMorePages()) { + BlockingOperation.checkNotDriverThread(); + AsyncGraphResultSet nextPage = + CompletableFutures.getUninterruptibly(currentPage.fetchNextPage()); + currentPage = nextPage; + remaining += currentPage.remaining(); + currentRows = nextPage.currentPage().iterator(); + executionInfos.add(nextPage.getRequestExecutionInfo()); + } + } + + private void cancel() { + currentPage.cancel(); + cancelled = true; + } + + public boolean isCancelled() { + return cancelled; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java new file mode 100644 index 00000000000..56123799fdd --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/ObjectGraphNode.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.shaded.guava.common.base.Objects; +import java.util.List; +import java.util.Map; +import java.util.Set; +import net.jcip.annotations.Immutable; +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; + +/** + * Modern implementation for GraphSON 2+ results. + * + *

The server returns results with type information. The driver works with the decoded objects + * directly. + */ +@Immutable +public class ObjectGraphNode implements GraphNode { + + private final Object delegate; + + public ObjectGraphNode(Object delegate) { + this.delegate = delegate; + } + + @Override + public boolean isNull() { + return delegate == null; + } + + @Override + public boolean isMap() { + return delegate instanceof Map; + } + + @Override + public Iterable keys() { + return ((Map) delegate).keySet(); + } + + @Override + public GraphNode getByKey(Object key) { + if (!isMap()) { + return null; + } + Map map = asMap(); + if (map.containsKey(key)) { + return new ObjectGraphNode(map.get(key)); + } + return null; + } + + @Override + @SuppressWarnings("unchecked") + public Map asMap() { + return (Map) delegate; + } + + @Override + public boolean isList() { + return delegate instanceof List; + } + + @Override + public int size() { + if (isList()) { + return asList().size(); + } else if (isMap()) { + return asMap().size(); + } else if (isSet()) { + return asSet().size(); + } else { + return 0; + } + } + + @Override + public GraphNode getByIndex(int index) { + if (!isList() || index < 0 || index >= size()) { + return null; + } + return new ObjectGraphNode(asList().get(index)); + } + + @Override + @SuppressWarnings("unchecked") + public List asList() { + return (List) delegate; + } + + @Override + public boolean isValue() { + return !(isList() + || isMap() + || isSet() + || isVertex() + || isEdge() + || isPath() + || isProperty() + || isVertexProperty()); + } + + @Override + public boolean isVertexProperty() { + return delegate instanceof VertexProperty; + } + + @Override + public boolean isProperty() { + return delegate instanceof Property; + } + + @Override + public boolean isPath() { + return delegate instanceof Path; + } + + @Override + public int asInt() { + return (Integer) delegate; + } + + @Override + public boolean asBoolean() { + return (Boolean) delegate; + } + + @Override + public long asLong() { + return (Long) delegate; + } + + @Override + public double asDouble() { + return (Double) delegate; + } + + @Override + public String asString() { + return (String) delegate; + } + + @Override + @SuppressWarnings("unchecked") + public T as(Class clazz) { + return (T) delegate; + } + + @Override + @SuppressWarnings("unchecked") + public T as(GenericType type) { + return (T) delegate; + } + + @Override + public boolean isVertex() { + return delegate instanceof Vertex; + } + + @Override + public Vertex asVertex() { + return (Vertex) delegate; + } + + @Override + public boolean isEdge() { + return delegate instanceof Edge; + } + + @Override + public Edge asEdge() { + return (Edge) delegate; + } + + @Override + public Path asPath() { + return (Path) delegate; + } + + @Override + @SuppressWarnings("unchecked") + public Property asProperty() { + return (Property) delegate; + } + + @Override + @SuppressWarnings("unchecked") + public VertexProperty asVertexProperty() { + return (VertexProperty) delegate; + } + + @Override + public boolean isSet() { + return delegate instanceof Set; + } + + @Override + @SuppressWarnings("unchecked") + public Set asSet() { + return (Set) delegate; + } + + @Override + public String toString() { + return this.delegate.toString(); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + // Compare each others' delegates. + return other instanceof ObjectGraphNode + && Objects.equal(this.delegate, ((ObjectGraphNode) other).delegate); + } + + @Override + public int hashCode() { + return Objects.hashCode(delegate); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java new file mode 100644 index 00000000000..b69c3a59cf0 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchPredicate.java @@ -0,0 +1,301 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import java.util.List; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * List of predicates for geolocation usage with DseGraph and Search indexes. Should not be accessed + * directly but through the {@link com.datastax.dse.driver.api.core.graph.predicates.Search} static + * methods. + */ +public enum SearchPredicate implements DsePredicate { + /** Whether the text contains a given term as a token in the text (case insensitive). */ + token { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + return value != null && evaluate(value.toString(), (String) condition); + } + + boolean evaluate(String value, String terms) { + Set tokens = Sets.newHashSet(tokenize(value.toLowerCase())); + terms = terms.trim(); + List tokenTerms = tokenize(terms.toLowerCase()); + if (!terms.isEmpty() && tokenTerms.isEmpty()) { + return false; + } + for (String term : tokenTerms) { + if (!tokens.contains(term)) { + return false; + } + } + return true; + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null && isNotBlank((String) condition); + } + + @Override + public String toString() { + return "token"; + } + }, + + /** Whether the text contains a token that starts with a given term (case insensitive). */ + tokenPrefix { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + return value != null && evaluate(value.toString(), (String) condition); + } + + boolean evaluate(String value, String prefix) { + for (String token : tokenize(value.toLowerCase())) { + if (token.startsWith(prefix.toLowerCase().trim())) { + return true; + } + } + return false; + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null; + } + + @Override + public String toString() { + return "tokenPrefix"; + } + }, + + /** Whether the text contains a token that matches a regular expression (case insensitive). */ + tokenRegex { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + return value != null && evaluate(value.toString(), (String) condition); + } + + boolean evaluate(String value, String regex) { + Pattern compiled = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); + for (String token : tokenize(value.toLowerCase())) { + if (compiled.matcher(token).matches()) { + return true; + } + } + return false; + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null && isNotBlank((String) condition); + } + + @Override + public String toString() { + return "tokenRegex"; + } + }, + + /** + * Whether some token in the text is within a given edit distance from the given term (case + * insensitive). + */ + tokenFuzzy { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + if (value == null) { + return false; + } + + EditDistance fuzzyCondition = (EditDistance) condition; + + for (String token : tokenize(value.toString().toLowerCase())) { + if (SearchUtils.getOptimalStringAlignmentDistance(token, fuzzyCondition.query.toLowerCase()) + <= fuzzyCondition.distance) { + return true; + } + } + + return false; + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null; + } + + @Override + public String toString() { + return "tokenFuzzy"; + } + }, + + /** Whether the text starts with a given prefix (case sensitive). */ + prefix { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + return value != null && value.toString().startsWith(((String) condition).trim()); + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null; + } + + @Override + public String toString() { + return "prefix"; + } + }, + + /** Whether the text matches a regular expression (case sensitive). */ + regex { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + return value != null + && Pattern.compile((String) condition, Pattern.DOTALL) + .matcher(value.toString()) + .matches(); + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null && isNotBlank((String) condition); + } + + @Override + public String toString() { + return "regex"; + } + }, + + /** Whether the text is within a given edit distance from the given term (case sensitive). */ + fuzzy { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + if (value == null) { + return false; + } + EditDistance fuzzyCondition = (EditDistance) condition; + return SearchUtils.getOptimalStringAlignmentDistance(value.toString(), fuzzyCondition.query) + <= fuzzyCondition.distance; + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null; + } + + @Override + public String toString() { + return "fuzzy"; + } + }, + + /** + * Whether tokenized text contains a given phrase, optionally within a given proximity (case + * insensitive). + */ + phrase { + @Override + public boolean test(Object value, Object condition) { + preEvaluate(condition); + if (value == null) { + return false; + } + + EditDistance phraseCondition = (EditDistance) condition; + + List valueTokens = tokenize(value.toString().toLowerCase()); + List phraseTokens = tokenize(phraseCondition.query.toLowerCase()); + + int valuePosition = 0; + int phrasePosition = 0; + int distance = 0; + + // Look for matches while phrase/value tokens and distance budget remain + while (phrasePosition < phraseTokens.size() + && valuePosition < valueTokens.size() + && distance <= phraseCondition.distance) { + + if (phraseTokens.get(phrasePosition).equals(valueTokens.get(valuePosition))) { + // Early return-true when we've matched the whole phrase (within the specified distance) + if (phrasePosition == phraseTokens.size() - 1) { + return true; + } + phrasePosition++; + } else if (0 < phrasePosition) { + // We've previously found at least one matching token in the input string, + // but the current token does not match the phrase. Increment distance. + distance++; + } + + valuePosition++; + } + + return false; + } + + @Override + public boolean isValidCondition(Object condition) { + return condition != null; + } + + @Override + public String toString() { + return "phrase"; + } + }; + + private static boolean isNotBlank(String str) { + if (str == null || str.isEmpty()) { + return false; + } + int strLen = str.length(); + for (int i = 0; i < strLen; i++) { + if (!Character.isWhitespace(str.charAt(i))) { + return true; + } + } + return false; + } + + // Match anything that is not either: + // 1) a unicode letter, regardless of subcategory (same as Character.isLetter), or + // 2) a unicode decimal digit number (same as Character.isDigit) + private static final Pattern TOKEN_SPLIT_PATTERN = Pattern.compile("[^\\p{L}\\p{Nd}]"); + + static List tokenize(String str) { + String[] rawTokens = TOKEN_SPLIT_PATTERN.split(str); // could contain empty strings + return Stream.of(rawTokens).filter(t -> 0 < t.length()).collect(Collectors.toList()); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java new file mode 100644 index 00000000000..3440c40e87a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SearchUtils.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +public class SearchUtils { + + /** + * Finds the Optimal + * string alignment distance – also referred to as the Damerau-Levenshtein distance – between + * two strings. + * + *

This is the number of changes needed to change one string into another (insertions, + * deletions or substitutions of a single character, or transpositions of two adjacent + * characters). + * + *

This implementation is based on the Apache Commons Lang implementation of the Levenshtein + * distance, only adding support for transpositions. + * + *

Note that this is the distance used in Lucene for {@code FuzzyTermsEnum}. Lucene itself has + * an implementation of this algorithm, but it is much less efficient in terms of space (also note + * that Lucene's implementation does not return the distance, but a similarity score based on it). + * + * @param s the first string, must not be {@code null}. + * @param t the second string, must not be {@code null}. + * @return The Optimal string alignment distance between the two strings. + * @throws IllegalArgumentException if either String input is {@code null}. + * @see org.apache.commons.lang.StringUtils#getLevenshteinDistance(String, String) + * @see + * LuceneLevenshteinDistance + */ + public static int getOptimalStringAlignmentDistance(String s, String t) { + + /* + * Code adapted from https://github.com/apache/commons-lang/blob/LANG_2_6/src/main/java/org/apache/commons/lang/StringUtils.java + * which was originally released under the Apache 2.0 license with the following copyright: + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + if (s == null || t == null) { + throw new IllegalArgumentException("Strings must not be null"); + } + + int n = s.length(); // length of s + int m = t.length(); // length of t + + if (n == 0) { + return m; + } else if (m == 0) { + return n; + } + + if (n > m) { + // swap the input strings to consume less memory + String tmp = s; + s = t; + t = tmp; + n = m; + m = t.length(); + } + + // instead of maintaining the full matrix in memory, + // we use a sliding window containing 3 lines: + // the current line being written to, and + // the two previous ones. + + int d[] = new int[n + 1]; // current line in the cost matrix + int p1[] = new int[n + 1]; // first line above the current one in the cost matrix + int p2[] = new int[n + 1]; // second line above the current one in the cost matrix + int _d[]; // placeholder to assist in swapping p1, p2 and d + + // indexes into strings s and t + int i; // iterates through s + int j; // iterates through t + + for (i = 0; i <= n; i++) { + p1[i] = i; + } + + for (j = 1; j <= m; j++) { + + // jth character of t + char t_j = t.charAt(j - 1); + d[0] = j; + + for (i = 1; i <= n; i++) { + + char s_i = s.charAt(i - 1); + int cost = s_i == t_j ? 0 : 1; + + int deletion = d[i - 1] + 1; // cell to the left + 1 + int insertion = p1[i] + 1; // cell to the top + 1 + int substitution = p1[i - 1] + cost; // cell diagonally left and up + cost + + d[i] = Math.min(Math.min(deletion, insertion), substitution); + + // transposition + if (i > 1 && j > 1 && s_i == t.charAt(j - 2) && s.charAt(i - 2) == t_j) { + d[i] = Math.min(d[i], p2[i - 2] + cost); + } + } + + // swap arrays + _d = p2; + p2 = p1; + p1 = d; + d = _d; + } + + // our last action in the above loop was to switch d and p1, so p1 now + // actually has the most recent cost counts + return p1[n]; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java new file mode 100644 index 00000000000..ff1d984d745 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/SinglePageGraphResultSet.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Iterator; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe +public class SinglePageGraphResultSet implements GraphResultSet { + + private final AsyncGraphResultSet onlyPage; + + public SinglePageGraphResultSet(AsyncGraphResultSet onlyPage) { + this.onlyPage = onlyPage; + assert !onlyPage.hasMorePages(); + } + + @NonNull + @Override + public ExecutionInfo getRequestExecutionInfo() { + return onlyPage.getRequestExecutionInfo(); + } + + @NonNull + @Override + @Deprecated + public com.datastax.dse.driver.api.core.graph.GraphExecutionInfo getExecutionInfo() { + return onlyPage.getExecutionInfo(); + } + + @NonNull + @Override + public Iterator iterator() { + return onlyPage.currentPage().iterator(); + } + + @Override + public void cancel() { + onlyPage.cancel(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java new file mode 100644 index 00000000000..5650d904350 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/TinkerpopBufferUtil.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import java.nio.ByteBuffer; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; + +/** Mirror of {@link ByteBufUtil} for Tinkerpop Buffer's */ +public class TinkerpopBufferUtil { + + public static ByteBuffer readBytes(Buffer tinkerBuff, int size) { + ByteBuffer res = ByteBuffer.allocate(size); + tinkerBuff.readBytes(res); + res.flip(); + return res; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java new file mode 100644 index 00000000000..649f5310c5d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractDynamicGraphBinaryCustomSerializer.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +/** + * Convenience class for dynamic types implemented as Custom types in GraphBinary. This class will + * take care of handling {value_length} automatically for implementing classes. {@link + * #writeDynamicCustomValue(Object, Buffer, GraphBinaryWriter)} and {@link + * #readDynamicCustomValue(Buffer, GraphBinaryReader)} only need to handle writing the internal + * components of the custom type. + * + * @param the java type the implementing classes will encode and decode. + */ +public abstract class AbstractDynamicGraphBinaryCustomSerializer + extends AbstractSimpleGraphBinaryCustomSerializer { + protected abstract void writeDynamicCustomValue(T value, Buffer buffer, GraphBinaryWriter context) + throws IOException; + + protected abstract T readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) + throws IOException; + + @Override + protected T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) + throws IOException { + int initialIndex = buffer.readerIndex(); + + // read actual custom value + T read = readDynamicCustomValue(buffer, context); + + // make sure we didn't read more than what was input as {value_length} + checkValueSize(valueLength, (buffer.readerIndex() - initialIndex)); + + return read; + } + + @Override + protected void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + // Store the current writer index + final int valueLengthIndex = buffer.writerIndex(); + + // Write a dummy length that will be overwritten at the end of this method + buffer.writeInt(0); + + // Custom type's writer logic + writeDynamicCustomValue(value, buffer, context); + + // value_length = diff written - 4 bytes for the dummy length + final int valueLength = buffer.writerIndex() - valueLengthIndex - GraphBinaryUtils.sizeOfInt(); + + // Go back, write the {value_length} and then reset back the writer index + buffer.markWriterIndex().writerIndex(valueLengthIndex).writeInt(valueLength).resetWriterIndex(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java new file mode 100644 index 00000000000..6dd149707e8 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/AbstractSimpleGraphBinaryCustomSerializer.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.DataType; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.apache.tinkerpop.gremlin.structure.io.binary.types.CustomTypeSerializer; + +/** + * A base custom type serializer for DSE types that handles most of the boiler plate code associated + * with GraphBinary's custom types. + * + *

The full format of a custom type in GraphBinary is the following: + * + *

{type_code}{custom_type_name}{custom_type_info_length}{custom_type_info_bytes}{value_flag}{value_length}{value_bytes} + * + *

This class is made to handle + * {type_code}{custom_type_name}{custom_type_info_length}{custom_type_info_bytes}{value_flag} for + * DSE types. + * + *

Implementing classes are still in charge of encoding {value_length}{value_bytes} in the {@link + * #readCustomValue(int, Buffer, GraphBinaryReader)} implementations. + * + *

Implementing classes must override {@link CustomTypeSerializer#getTypeName()} with their own + * type name. + * + * @param the java type the implementing classes will encode and decode. + */ +abstract class AbstractSimpleGraphBinaryCustomSerializer implements CustomTypeSerializer { + AbstractSimpleGraphBinaryCustomSerializer() { + super(); + } + + protected static final String INCORRECT_VALUE_LENGTH_ERROR_MESSAGE = + "{value_length} read for this value does not correspond to the size of a '%s' value. [%s] bytes required but got [%s]"; + + protected abstract T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) + throws IOException; + + protected abstract void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) + throws IOException; + + protected void checkValueSize(int lengthRequired, int lengthFound) { + Preconditions.checkArgument( + lengthFound == lengthRequired, + INCORRECT_VALUE_LENGTH_ERROR_MESSAGE, + getTypeName(), + lengthRequired, + lengthFound); + } + + @Override + public DataType getDataType() { + return DataType.CUSTOM; + } + + @Override + public T read(Buffer buffer, GraphBinaryReader context) throws IOException { + // the type serializer registry will take care of deserializing {custom_type_name} + // read {custom_type_info_length} and verify it is 0. + // See #write(T, ByteBuf, GraphBinaryWriter) for why it is set to 0 + if (context.readValue(buffer, Integer.class, false) != 0) { + throw new IOException("{custom_type_info} should not be provided for this custom type"); + } + + return readValue(buffer, context, true); + } + + @Override + public T readValue(Buffer buffer, GraphBinaryReader context, boolean nullable) + throws IOException { + if (nullable) { + // read {value_flag} + final byte valueFlag = buffer.readByte(); + + // if value is null and the value is nullable + if ((valueFlag & 1) == 1) { + return null; + } + // Note: we don't error out if the valueFlag == "value is null" and nullable == false because + // the serializer + // should have errored out at write time if that was the case. + } + + // Read the byte length of the value bytes + final int valueLength = buffer.readInt(); + + if (valueLength <= 0) { + throw new IOException(String.format("Unexpected value length: %d", valueLength)); + } + + if (valueLength > buffer.readableBytes()) { + throw new IOException( + String.format( + "Not enough readable bytes: %d bytes required for value (%d bytes available)", + valueLength, buffer.readableBytes())); + } + + // subclasses are responsible for reading {value} + return readCustomValue(valueLength, buffer, context); + } + + @Override + public void write(final T value, final Buffer buffer, final GraphBinaryWriter context) + throws IOException { + // the type serializer registry will take care of serializing {custom_type_name} + // write "{custom_type_info_length}" to 0 because we don't need it for the DSE types + context.writeValue(0, buffer, false); + writeValue(value, buffer, context, true); + } + + @Override + public void writeValue( + final T value, final Buffer buffer, final GraphBinaryWriter context, final boolean nullable) + throws IOException { + if (value == null) { + if (!nullable) { + throw new IOException("Unexpected null value when nullable is false"); + } + + // writes {value_flag} to "1" which means "the value is null" + context.writeValueFlagNull(buffer); + return; + } + + if (nullable) { + // writes {value_flag} to "0" which means "value is not null" + context.writeValueFlagNone(buffer); + } + + // sub classes will be responsible for writing {value_length} and {value_bytes} + writeCustomValue(value, buffer, context); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java new file mode 100644 index 00000000000..bec3c78743a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/ComplexTypeSerializerUtil.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.internal.core.graph.TinkerpopBufferUtil; +import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; +import com.datastax.dse.driver.internal.core.protocol.TinkerpopBufferPrimitiveCodec; +import com.datastax.oss.driver.api.core.data.GettableByIndex; +import com.datastax.oss.driver.api.core.data.SettableByIndex; +import com.datastax.oss.driver.api.core.type.CustomType; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.ListType; +import com.datastax.oss.driver.api.core.type.MapType; +import com.datastax.oss.driver.api.core.type.SetType; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.type.DataTypeHelper; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.PrimitiveCodec; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.response.result.RawType; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.util.Objects; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; + +class ComplexTypeSerializerUtil { + + private static final PrimitiveCodec codec = + new TinkerpopBufferPrimitiveCodec(new DseNettyBufferFactory()); + + static void encodeTypeDefinition( + DataType type, Buffer buffer, DefaultDriverContext driverContext) { + RawType protocolType = toProtocolSpec(type); + protocolType.encode(buffer, codec, driverContext.getProtocolVersion().getCode()); + } + + static DataType decodeTypeDefinition(Buffer buffer, DefaultDriverContext driverContext) { + RawType type = RawType.decode(buffer, codec, driverContext.getProtocolVersion().getCode()); + return DataTypeHelper.fromProtocolSpec(type, driverContext); + } + + /* Tinkerpop-based encoding of UDT values, based on the UdtCoded.encode() method, but using Tinkerpop buffers directly to avoid + unnecessary NIO ByteBuffer copies. */ + static void encodeValue(@Nullable GettableByIndex value, Buffer tinkerBuff) { + if (value == null) { + return; + } + + for (int i = 0; i < value.size(); i++) { + ByteBuffer fieldBuffer = value.getBytesUnsafe(i); + if (fieldBuffer == null) { + tinkerBuff.writeInt(-1); + } else { + tinkerBuff.writeInt(fieldBuffer.remaining()); + tinkerBuff.writeBytes(fieldBuffer.duplicate()); + } + } + } + + /* This method will move forward the Tinkerpop buffer given in parameter based on the UDT value read. + Content of the method is roughly equivalent to UdtCodec.decode(), but using Tinkerpop buffers directly to avoid + unnecessary NIO ByteBuffer copies. */ + static > T decodeValue(Buffer tinkerBuff, T val, int size) { + try { + for (int i = 0; i < size; i++) { + int fieldSize = tinkerBuff.readInt(); + if (fieldSize >= 0) { + // the reassignment is to shut down the error-prone warning about ignoring return values. + val = val.setBytesUnsafe(i, TinkerpopBufferUtil.readBytes(tinkerBuff, fieldSize)); + } + } + return val; + } catch (BufferUnderflowException e) { + throw new IllegalArgumentException("Not enough bytes to deserialize a UDT value", e); + } + } + + private static RawType toProtocolSpec(DataType dataType) { + int id = dataType.getProtocolCode(); + RawType type = RawType.PRIMITIVES.get(id); + if (type != null) { + return type; + } + + switch (id) { + case ProtocolConstants.DataType.CUSTOM: + CustomType customType = ((CustomType) dataType); + type = new RawType.RawCustom(customType.getClassName()); + break; + case ProtocolConstants.DataType.LIST: + ListType listType = ((ListType) dataType); + type = new RawType.RawList(toProtocolSpec(listType.getElementType())); + break; + case ProtocolConstants.DataType.SET: + SetType setType = ((SetType) dataType); + type = new RawType.RawSet(toProtocolSpec(setType.getElementType())); + break; + case ProtocolConstants.DataType.MAP: + MapType mapType = ((MapType) dataType); + type = + new RawType.RawMap( + toProtocolSpec(mapType.getKeyType()), toProtocolSpec(mapType.getValueType())); + break; + case ProtocolConstants.DataType.TUPLE: + TupleType tupleType = ((TupleType) dataType); + ImmutableList.Builder subTypesList = + ImmutableList.builderWithExpectedSize(tupleType.getComponentTypes().size()); + for (int i = 0; i < tupleType.getComponentTypes().size(); i++) { + subTypesList.add(toProtocolSpec(tupleType.getComponentTypes().get(i))); + } + type = new RawType.RawTuple(subTypesList.build()); + break; + case ProtocolConstants.DataType.UDT: + UserDefinedType userDefinedType = ((UserDefinedType) dataType); + ImmutableMap.Builder subTypesMap = + ImmutableMap.builderWithExpectedSize(userDefinedType.getFieldNames().size()); + for (int i = 0; i < userDefinedType.getFieldTypes().size(); i++) { + subTypesMap.put( + userDefinedType.getFieldNames().get(i).asInternal(), + toProtocolSpec(userDefinedType.getFieldTypes().get(i))); + } + type = + new RawType.RawUdt( + Objects.requireNonNull(userDefinedType.getKeyspace()).asInternal(), + userDefinedType.getName().asInternal(), + subTypesMap.build()); + break; + default: + throw new IllegalArgumentException("Unsupported type: " + dataType.asCql(true, true)); + } + return type; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java new file mode 100644 index 00000000000..1ac97de0ef4 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/CqlDurationSerializer.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.oss.driver.api.core.data.CqlDuration; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +public class CqlDurationSerializer extends AbstractSimpleGraphBinaryCustomSerializer { + + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_DURATION_TYPE_NAME; + } + + @Override + protected CqlDuration readCustomValue( + final int valueLength, final Buffer buffer, final GraphBinaryReader context) + throws IOException { + checkValueSize(GraphBinaryUtils.sizeOfDuration(), valueLength); + return CqlDuration.newInstance( + context.readValue(buffer, Integer.class, false), + context.readValue(buffer, Integer.class, false), + context.readValue(buffer, Long.class, false)); + } + + @Override + protected void writeCustomValue(CqlDuration value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + context.writeValue(GraphBinaryUtils.sizeOfDuration(), buffer, false); + context.writeValue(value.getMonths(), buffer, false); + context.writeValue(value.getDays(), buffer, false); + context.writeValue(value.getNanoseconds(), buffer, false); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java new file mode 100644 index 00000000000..9e281b2b84a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/DistanceSerializer.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.internal.core.data.geometry.Distance; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +public class DistanceSerializer extends AbstractSimpleGraphBinaryCustomSerializer { + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_DISTANCE_TYPE_NAME; + } + + @Override + protected Distance readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) + throws IOException { + Point p = context.readValue(buffer, Point.class, false); + checkValueSize(GraphBinaryUtils.sizeOfDistance(p), valueLength); + return new Distance(p, context.readValue(buffer, Double.class, false)); + } + + @Override + protected void writeCustomValue(Distance value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + context.writeValue(GraphBinaryUtils.sizeOfDistance(value.getCenter()), buffer, false); + context.writeValue(value.getCenter(), buffer, false); + context.writeValue(value.getRadius(), buffer, false); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java new file mode 100644 index 00000000000..b2831040123 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/EditDistanceSerializer.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.internal.core.graph.EditDistance; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +public class EditDistanceSerializer + extends AbstractSimpleGraphBinaryCustomSerializer { + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_EDIT_DISTANCE_TYPE_NAME; + } + + @Override + protected EditDistance readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) + throws IOException { + int distance = context.readValue(buffer, Integer.class, false); + String query = context.readValue(buffer, String.class, false); + checkValueSize(GraphBinaryUtils.sizeOfEditDistance(query), valueLength); + + return new EditDistance(query, distance); + } + + @Override + protected void writeCustomValue(EditDistance value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + context.writeValue(GraphBinaryUtils.sizeOfEditDistance(value.query), buffer, false); + context.writeValue(value.distance, buffer, false); + context.writeValue(value.query, buffer, false); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java new file mode 100644 index 00000000000..996e79c7693 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GeometrySerializer.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.dse.driver.internal.core.graph.TinkerpopBufferUtil; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +public abstract class GeometrySerializer + extends AbstractSimpleGraphBinaryCustomSerializer { + public abstract T fromWellKnownBinary(ByteBuffer buffer); + + @Override + protected T readCustomValue(int valueLength, Buffer buffer, GraphBinaryReader context) + throws IOException { + return fromWellKnownBinary(TinkerpopBufferUtil.readBytes(buffer, valueLength)); + } + + @Override + protected void writeCustomValue(T value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + ByteBuffer bb = value.asWellKnownBinary(); + + // writing the {value_length} + context.writeValue(bb.remaining(), buffer, false); + buffer.writeBytes(bb); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java new file mode 100644 index 00000000000..59f966a34c2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryModule.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.internal.core.data.geometry.Distance; +import com.datastax.dse.driver.internal.core.graph.EditDistance; +import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.UnpooledByteBufAllocator; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; +import org.javatuples.Pair; + +public class GraphBinaryModule { + public static final UnpooledByteBufAllocator ALLOCATOR = new UnpooledByteBufAllocator(false); + private static final BufferFactory FACTORY = new DseNettyBufferFactory(); + + static final String GRAPH_BINARY_POINT_TYPE_NAME = "driver.dse.geometry.Point"; + static final String GRAPH_BINARY_LINESTRING_TYPE_NAME = "driver.dse.geometry.LineString"; + static final String GRAPH_BINARY_POLYGON_TYPE_NAME = "driver.dse.geometry.Polygon"; + static final String GRAPH_BINARY_DISTANCE_TYPE_NAME = "driver.dse.geometry.Distance"; + static final String GRAPH_BINARY_DURATION_TYPE_NAME = "driver.core.Duration"; + static final String GRAPH_BINARY_EDIT_DISTANCE_TYPE_NAME = "driver.dse.search.EditDistance"; + static final String GRAPH_BINARY_TUPLE_VALUE_TYPE_NAME = "driver.core.TupleValue"; + static final String GRAPH_BINARY_UDT_VALUE_TYPE_NAME = "driver.core.UDTValue"; + static final String GRAPH_BINARY_PAIR_TYPE_NAME = "org.javatuples.Pair"; + + private final GraphBinaryReader reader; + private final GraphBinaryWriter writer; + + public GraphBinaryModule(GraphBinaryReader reader, GraphBinaryWriter writer) { + this.reader = reader; + this.writer = writer; + } + + public static TypeSerializerRegistry createDseTypeSerializerRegistry( + DefaultDriverContext driverContext) { + return TypeSerializerRegistry.build() + .addCustomType(CqlDuration.class, new CqlDurationSerializer()) + .addCustomType(Point.class, new PointSerializer()) + .addCustomType(LineString.class, new LineStringSerializer()) + .addCustomType(Polygon.class, new PolygonSerializer()) + .addCustomType(Distance.class, new DistanceSerializer()) + .addCustomType(EditDistance.class, new EditDistanceSerializer()) + .addCustomType(TupleValue.class, new TupleValueSerializer(driverContext)) + .addCustomType(UdtValue.class, new UdtValueSerializer(driverContext)) + .addCustomType(Pair.class, new PairSerializer()) + .create(); + } + + @SuppressWarnings("TypeParameterUnusedInFormals") + public T deserialize(final Buffer buffer) throws IOException { + return reader.read(buffer); + } + + public Buffer serialize(final T value) throws IOException { + return serialize(value, FACTORY.create(ALLOCATOR.heapBuffer())); + } + + public Buffer serialize(final T value, final Buffer buffer) throws IOException { + try { + writer.write(value, buffer); + return buffer; + } catch (Exception e) { + buffer.release(); + throw e; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java new file mode 100644 index 00000000000..42283cd5167 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/GraphBinaryUtils.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import java.nio.charset.StandardCharsets; + +class GraphBinaryUtils { + static int sizeOfInt() { + return 4; + } + + static int sizeOfLong() { + return 8; + } + + static int sizeOfDouble() { + return 8; + } + + static int sizeOfPoint(Point point) { + return point.asWellKnownBinary().remaining(); + } + + /* assumes UTF8 */ + static int sizeOfString(String s) { + // length + data length + return sizeOfInt() + s.getBytes(StandardCharsets.UTF_8).length; + } + + static int sizeOfDuration() { + return sizeOfInt() + sizeOfInt() + sizeOfLong(); + } + + static int sizeOfDistance(Point point) { + return sizeOfPoint(point) + sizeOfDouble(); + } + + static int sizeOfEditDistance(String s) { + return sizeOfInt() + sizeOfString(s); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java new file mode 100644 index 00000000000..4dfa8f8f0f1 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/LineStringSerializer.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import java.nio.ByteBuffer; + +public class LineStringSerializer extends GeometrySerializer { + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_LINESTRING_TYPE_NAME; + } + + @Override + public LineString fromWellKnownBinary(ByteBuffer buffer) { + return LineString.fromWellKnownBinary(buffer); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java new file mode 100644 index 00000000000..3f13dd5b3a0 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PairSerializer.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.javatuples.Pair; + +public class PairSerializer extends AbstractDynamicGraphBinaryCustomSerializer { + + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_PAIR_TYPE_NAME; + } + + @Override + protected Pair readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) + throws IOException { + return new Pair<>(context.read(buffer), context.read(buffer)); + } + + @Override + protected void writeDynamicCustomValue(Pair value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + context.write(value.getValue0(), buffer); + context.write(value.getValue1(), buffer); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java new file mode 100644 index 00000000000..2204b0da073 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PointSerializer.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import java.nio.ByteBuffer; + +public class PointSerializer extends GeometrySerializer { + + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_POINT_TYPE_NAME; + } + + @Override + public Point fromWellKnownBinary(ByteBuffer buffer) { + return Point.fromWellKnownBinary(buffer); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java new file mode 100644 index 00000000000..8e3bc67838a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/PolygonSerializer.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import java.nio.ByteBuffer; + +public class PolygonSerializer extends GeometrySerializer { + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_POLYGON_TYPE_NAME; + } + + @Override + public Polygon fromWellKnownBinary(ByteBuffer buffer) { + return Polygon.fromWellKnownBinary(buffer); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java new file mode 100644 index 00000000000..b7c6fc2098d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/TupleValueSerializer.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +public class TupleValueSerializer extends AbstractDynamicGraphBinaryCustomSerializer { + + private final DefaultDriverContext driverContext; + + public TupleValueSerializer(DefaultDriverContext driverContext) { + this.driverContext = driverContext; + } + + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_TUPLE_VALUE_TYPE_NAME; + } + + @Override + public TupleValue readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) + throws IOException { + // read the type first + DataType type = ComplexTypeSerializerUtil.decodeTypeDefinition(buffer, driverContext); + + assert type instanceof TupleType + : "GraphBinary TupleValue deserializer was called on a value that is not encoded as a TupleValue."; + + TupleType tupleType = (TupleType) type; + TupleValue value = tupleType.newValue(); + + // then decode the values from the buffer + return ComplexTypeSerializerUtil.decodeValue( + buffer, value, tupleType.getComponentTypes().size()); + } + + @Override + public void writeDynamicCustomValue(TupleValue value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + // write type first in native protocol + ComplexTypeSerializerUtil.encodeTypeDefinition(value.getType(), buffer, driverContext); + + // write value after + ComplexTypeSerializerUtil.encodeValue(value, buffer); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java new file mode 100644 index 00000000000..3e617ebf926 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/UdtValueSerializer.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import java.io.IOException; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; + +public class UdtValueSerializer extends AbstractDynamicGraphBinaryCustomSerializer { + private final DefaultDriverContext driverContext; + + public UdtValueSerializer(DefaultDriverContext driverContext) { + this.driverContext = driverContext; + } + + @Override + public String getTypeName() { + return GraphBinaryModule.GRAPH_BINARY_UDT_VALUE_TYPE_NAME; + } + + @Override + public UdtValue readDynamicCustomValue(Buffer buffer, GraphBinaryReader context) + throws IOException { + // read type definition first + DataType driverType = ComplexTypeSerializerUtil.decodeTypeDefinition(buffer, driverContext); + + assert driverType instanceof UserDefinedType + : "GraphBinary UdtValue deserializer was called on a value that is not encoded as a UdtValue."; + + UserDefinedType userDefinedType = (UserDefinedType) driverType; + UdtValue value = userDefinedType.newValue(); + + // then read values + return ComplexTypeSerializerUtil.decodeValue( + buffer, value, userDefinedType.getFieldTypes().size()); + } + + @Override + public void writeDynamicCustomValue(UdtValue value, Buffer buffer, GraphBinaryWriter context) + throws IOException { + // write type first in native protocol format + ComplexTypeSerializerUtil.encodeTypeDefinition(value.getType(), buffer, driverContext); + // write value after + ComplexTypeSerializerUtil.encodeValue(value, buffer); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java new file mode 100644 index 00000000000..590ac2e9be2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBuffer.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary.buffer; + +import io.netty.buffer.ByteBuf; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; + +/** + * Internal impl of Tinkerpop Buffers. We implement an internal type here to allow for this class to + * use shaded Netty types (without bringing all of Tinkerpop into the shaded JAR). The impl is based + * on the initial impl of {@link NettyBuffer} but we don't guarantee that this class will mirror + * changes to that class over time. + */ +final class DseNettyBuffer implements Buffer { + private final ByteBuf buffer; + + /** + * Creates a new instance. + * + * @param buffer The buffer to wrap. + */ + DseNettyBuffer(ByteBuf buffer) { + if (buffer == null) { + throw new IllegalArgumentException("buffer can't be null"); + } + + this.buffer = buffer; + } + + @Override + public int readableBytes() { + return this.buffer.readableBytes(); + } + + @Override + public int readerIndex() { + return this.buffer.readerIndex(); + } + + @Override + public Buffer readerIndex(final int readerIndex) { + this.buffer.readerIndex(readerIndex); + return this; + } + + @Override + public int writerIndex() { + return this.buffer.writerIndex(); + } + + @Override + public Buffer writerIndex(final int writerIndex) { + this.buffer.writerIndex(writerIndex); + return this; + } + + @Override + public Buffer markWriterIndex() { + this.buffer.markWriterIndex(); + return this; + } + + @Override + public Buffer resetWriterIndex() { + this.buffer.resetWriterIndex(); + return this; + } + + @Override + public int capacity() { + return this.buffer.capacity(); + } + + @Override + public boolean isDirect() { + return this.buffer.isDirect(); + } + + @Override + public boolean readBoolean() { + return this.buffer.readBoolean(); + } + + @Override + public byte readByte() { + return this.buffer.readByte(); + } + + @Override + public short readShort() { + return this.buffer.readShort(); + } + + @Override + public int readInt() { + return this.buffer.readInt(); + } + + @Override + public long readLong() { + return this.buffer.readLong(); + } + + @Override + public float readFloat() { + return this.buffer.readFloat(); + } + + @Override + public double readDouble() { + return this.buffer.readDouble(); + } + + @Override + public Buffer readBytes(final byte[] destination) { + this.buffer.readBytes(destination); + return this; + } + + @Override + public Buffer readBytes(final byte[] destination, final int dstIndex, final int length) { + this.buffer.readBytes(destination, dstIndex, length); + return this; + } + + @Override + public Buffer readBytes(final ByteBuffer dst) { + this.buffer.readBytes(dst); + return this; + } + + @Override + public Buffer readBytes(final OutputStream out, final int length) throws IOException { + this.buffer.readBytes(out, length); + return this; + } + + @Override + public Buffer writeBoolean(final boolean value) { + this.buffer.writeBoolean(value); + return this; + } + + @Override + public Buffer writeByte(final int value) { + this.buffer.writeByte(value); + return this; + } + + @Override + public Buffer writeShort(final int value) { + this.buffer.writeShort(value); + return this; + } + + @Override + public Buffer writeInt(final int value) { + this.buffer.writeInt(value); + return this; + } + + @Override + public Buffer writeLong(final long value) { + this.buffer.writeLong(value); + return this; + } + + @Override + public Buffer writeFloat(final float value) { + this.buffer.writeFloat(value); + return this; + } + + @Override + public Buffer writeDouble(final double value) { + this.buffer.writeDouble(value); + return this; + } + + @Override + public Buffer writeBytes(final byte[] src) { + this.buffer.writeBytes(src); + return this; + } + + @Override + public Buffer writeBytes(final ByteBuffer src) { + this.buffer.writeBytes(src); + return this; + } + + @Override + public Buffer writeBytes(byte[] src, final int srcIndex, final int length) { + this.buffer.writeBytes(src, srcIndex, length); + return this; + } + + @Override + public boolean release() { + return this.buffer.release(); + } + + @Override + public Buffer retain() { + this.buffer.retain(); + return this; + } + + @Override + public int referenceCount() { + return this.buffer.refCnt(); + } + + @Override + public ByteBuffer[] nioBuffers() { + return this.buffer.nioBuffers(); + } + + @Override + public ByteBuffer nioBuffer() { + return this.buffer.nioBuffer(); + } + + @Override + public ByteBuffer nioBuffer(final int index, final int length) { + return this.buffer.nioBuffer(index, length); + } + + @Override + public ByteBuffer[] nioBuffers(final int index, final int length) { + return this.buffer.nioBuffers(index, length); + } + + @Override + public int nioBufferCount() { + return this.buffer.nioBufferCount(); + } + + @Override + public Buffer getBytes(final int index, final byte[] dst) { + this.buffer.getBytes(index, dst); + return this; + } + + /** Returns the underlying buffer. */ + public ByteBuf getUnderlyingBuffer() { + return this.buffer; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java new file mode 100644 index 00000000000..57ee3cb1a9d --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/binary/buffer/DseNettyBufferFactory.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary.buffer; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import java.nio.ByteBuffer; +import java.util.function.Supplier; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.BufferFactory; + +/** + * Internal BufferFactory impl for creation of Tinkerpop buffers. We implement an internal type here + * to allow for this class to use shaded Netty types (without bringing all of Tinkerpop into the + * shaded JAR). The impl is based on the initial impl of {@code + * org.apache.tinkerpop.gremlin.driver.ser.NettyBufferFactory} but we don't guarantee that this + * class will mirror changes to that class over time. + */ +public class DseNettyBufferFactory implements BufferFactory { + + private static final ByteBufAllocator DEFAULT_ALLOCATOR = new UnpooledByteBufAllocator(false); + + private final ByteBufAllocator allocator; + + public DseNettyBufferFactory() { + this.allocator = DEFAULT_ALLOCATOR; + } + + public DseNettyBufferFactory(ByteBufAllocator allocator) { + this.allocator = allocator; + } + + @Override + public Buffer create(final ByteBuf value) { + return new DseNettyBuffer(value); + } + + @Override + public Buffer wrap(final ByteBuffer value) { + return create(Unpooled.wrappedBuffer(value)); + } + + public Buffer heap() { + return create(allocator.heapBuffer()); + } + + public Buffer heap(int initialSize) { + return create(allocator.heapBuffer(initialSize)); + } + + public Buffer heap(int initialSize, int maxSize) { + return create(allocator.heapBuffer(initialSize, maxSize)); + } + + public Buffer io() { + return create(allocator.ioBuffer()); + } + + public Buffer io(int initialSize) { + return create(allocator.ioBuffer(initialSize)); + } + + public Buffer io(int initialSize, int maxSize) { + return create(allocator.ioBuffer(initialSize, maxSize)); + } + + public Buffer direct() { + return create(allocator.directBuffer()); + } + + public Buffer direct(int initialSize) { + return create(allocator.directBuffer(initialSize)); + } + + public Buffer direct(int initialSize, int maxSize) { + return create(allocator.directBuffer(initialSize, maxSize)); + } + + public Buffer composite(ByteBuf... components) { + + CompositeByteBuf buff = allocator.compositeBuffer(components.length); + buff.addComponents(components); + return create(buff); + } + + public Buffer composite(Buffer... components) { + ByteBuf[] nettyBufs = new ByteBuf[components.length]; + for (int i = 0; i < components.length; ++i) { + if (!(components[i] instanceof DseNettyBuffer)) { + throw new IllegalArgumentException("Can only concatenate DseNettyBuffer instances"); + } + nettyBufs[i] = ((DseNettyBuffer) components[i]).getUnderlyingBuffer(); + } + return composite(nettyBufs); + } + + public Buffer withBytes(int... bytes) { + return withBytes(this::heap, bytes); + } + + public Buffer withBytes(Supplier supplier, int... bytes) { + Buffer buff = supplier.get(); + for (int val : bytes) { + buff.writeByte(val); + } + return buff; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java new file mode 100644 index 00000000000..fda0eed5333 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphNode.java @@ -0,0 +1,205 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; +import java.util.Map; +import java.util.Set; +import net.jcip.annotations.NotThreadSafe; +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; + +@NotThreadSafe +class DefaultReactiveGraphNode implements ReactiveGraphNode { + + private final GraphNode graphNode; + private final ExecutionInfo executionInfo; + + DefaultReactiveGraphNode(@NonNull GraphNode graphNode, @NonNull ExecutionInfo executionInfo) { + this.graphNode = graphNode; + this.executionInfo = executionInfo; + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + return executionInfo; + } + + @Override + public boolean isNull() { + return graphNode.isNull(); + } + + @Override + public boolean isMap() { + return graphNode.isMap(); + } + + @Override + public Iterable keys() { + return graphNode.keys(); + } + + @Override + public GraphNode getByKey(Object key) { + return graphNode.getByKey(key); + } + + @Override + public Map asMap() { + return graphNode.asMap(); + } + + @Override + public boolean isList() { + return graphNode.isList(); + } + + @Override + public int size() { + return graphNode.size(); + } + + @Override + public GraphNode getByIndex(int index) { + return graphNode.getByIndex(index); + } + + @Override + public List asList() { + return graphNode.asList(); + } + + @Override + public boolean isValue() { + return graphNode.isValue(); + } + + @Override + public int asInt() { + return graphNode.asInt(); + } + + @Override + public boolean asBoolean() { + return graphNode.asBoolean(); + } + + @Override + public long asLong() { + return graphNode.asLong(); + } + + @Override + public double asDouble() { + return graphNode.asDouble(); + } + + @Override + public String asString() { + return graphNode.asString(); + } + + @Override + public ResultT as(Class clazz) { + return graphNode.as(clazz); + } + + @Override + public ResultT as(GenericType type) { + return graphNode.as(type); + } + + @Override + public boolean isVertex() { + return graphNode.isVertex(); + } + + @Override + public Vertex asVertex() { + return graphNode.asVertex(); + } + + @Override + public boolean isEdge() { + return graphNode.isEdge(); + } + + @Override + public Edge asEdge() { + return graphNode.asEdge(); + } + + @Override + public boolean isPath() { + return graphNode.isPath(); + } + + @Override + public Path asPath() { + return graphNode.asPath(); + } + + @Override + public boolean isProperty() { + return graphNode.isProperty(); + } + + @Override + public Property asProperty() { + return graphNode.asProperty(); + } + + @Override + public boolean isVertexProperty() { + return graphNode.isVertexProperty(); + } + + @Override + public VertexProperty asVertexProperty() { + return graphNode.asVertexProperty(); + } + + @Override + public boolean isSet() { + return graphNode.isSet(); + } + + @Override + public Set asSet() { + return graphNode.asSet(); + } + + @Override + public String toString() { + return "DefaultReactiveGraphNode{graphNode=" + + graphNode + + ", executionInfo=" + + executionInfo + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java new file mode 100644 index 00000000000..137e44e4d95 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/DefaultReactiveGraphResultSet.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; +import com.datastax.dse.driver.internal.core.cql.reactive.EmptySubscription; +import com.datastax.dse.driver.internal.core.cql.reactive.SimpleUnicastProcessor; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicBoolean; +import net.jcip.annotations.ThreadSafe; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +@ThreadSafe +public class DefaultReactiveGraphResultSet implements ReactiveGraphResultSet { + + private final Callable> firstPage; + + private final AtomicBoolean alreadySubscribed = new AtomicBoolean(false); + + private final SimpleUnicastProcessor executionInfosPublisher = + new SimpleUnicastProcessor<>(); + + public DefaultReactiveGraphResultSet(Callable> firstPage) { + this.firstPage = firstPage; + } + + @Override + public void subscribe(@NonNull Subscriber subscriber) { + // As per rule 1.9, we need to throw an NPE if subscriber is null + Objects.requireNonNull(subscriber, "Subscriber cannot be null"); + // As per rule 1.11, this publisher is allowed to support only one subscriber. + if (alreadySubscribed.compareAndSet(false, true)) { + ReactiveGraphResultSetSubscription subscription = + new ReactiveGraphResultSetSubscription(subscriber, executionInfosPublisher); + try { + subscriber.onSubscribe(subscription); + // must be done after onSubscribe + subscription.start(firstPage); + } catch (Throwable t) { + // As per rule 2.13: In the case that this rule is violated, + // any associated Subscription to the Subscriber MUST be considered as + // cancelled, and the caller MUST raise this error condition in a fashion + // that is adequate for the runtime environment. + subscription.doOnError( + new IllegalStateException( + subscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", + t)); + } + } else { + subscriber.onSubscribe(EmptySubscription.INSTANCE); + subscriber.onError( + new IllegalStateException("This publisher does not support multiple subscriptions")); + } + // As per 2.13, this method must return normally (i.e. not throw) + } + + @NonNull + @Override + public Publisher getExecutionInfos() { + return executionInfosPublisher; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java new file mode 100644 index 00000000000..45bbd8c62b0 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/FailedReactiveGraphResultSet.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; +import com.datastax.dse.driver.internal.core.cql.reactive.FailedPublisher; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.reactivestreams.Publisher; + +public class FailedReactiveGraphResultSet extends FailedPublisher + implements ReactiveGraphResultSet { + + public FailedReactiveGraphResultSet(Throwable error) { + super(error); + } + + @NonNull + @Override + public Publisher getExecutionInfos() { + return new FailedPublisher<>(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java new file mode 100644 index 00000000000..ed2cd28926c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessor.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; +import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class ReactiveGraphRequestProcessor + implements RequestProcessor, ReactiveGraphResultSet> { + + public static final GenericType REACTIVE_GRAPH_RESULT_SET = + GenericType.of(ReactiveGraphResultSet.class); + + private final GraphRequestAsyncProcessor asyncGraphProcessor; + + public ReactiveGraphRequestProcessor(@NonNull GraphRequestAsyncProcessor asyncGraphProcessor) { + this.asyncGraphProcessor = asyncGraphProcessor; + } + + @Override + public boolean canProcess(Request request, GenericType resultType) { + return request instanceof GraphStatement && resultType.equals(REACTIVE_GRAPH_RESULT_SET); + } + + @Override + public ReactiveGraphResultSet process( + GraphStatement request, + DefaultSession session, + InternalDriverContext context, + String sessionLogPrefix) { + return new DefaultReactiveGraphResultSet( + () -> asyncGraphProcessor.process(request, session, context, sessionLogPrefix)); + } + + @Override + public ReactiveGraphResultSet newFailure(RuntimeException error) { + return new FailedReactiveGraphResultSet(error); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java new file mode 100644 index 00000000000..c3234d74ebc --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphResultSetSubscription.java @@ -0,0 +1,475 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.reactive; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; +import com.datastax.dse.driver.internal.core.cql.reactive.ReactiveOperators; +import com.datastax.dse.driver.internal.core.util.concurrent.BoundedConcurrentQueue; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Iterator; +import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import net.jcip.annotations.ThreadSafe; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class is very similar to {@link + * com.datastax.dse.driver.internal.core.cql.reactive.ReactiveResultSetSubscription}. It exists + * merely because {@link AsyncGraphResultSet} is not a subtype of {@link + * com.datastax.oss.driver.api.core.AsyncPagingIterable} and thus it would be difficult to re-use + * ReactiveResultSetSubscription for graph result sets. + */ +@ThreadSafe +public class ReactiveGraphResultSetSubscription implements Subscription { + + private static final Logger LOG = + LoggerFactory.getLogger(ReactiveGraphResultSetSubscription.class); + + private static final int MAX_ENQUEUED_PAGES = 4; + + /** Tracks the number of items requested by the subscriber. */ + private final AtomicLong requested = new AtomicLong(0); + + /** The pages received so far, with a maximum of MAX_ENQUEUED_PAGES elements. */ + private final BoundedConcurrentQueue pages = + new BoundedConcurrentQueue<>(MAX_ENQUEUED_PAGES); + + /** + * Used to signal that a thread is currently draining, i.e., emitting items to the subscriber. + * When it is zero, that means there is no ongoing emission. This mechanism effectively serializes + * access to the drain() method, and also keeps track of missed attempts to enter it, since each + * thread that attempts to drain will increment this counter. + * + * @see #drain() + */ + private final AtomicInteger draining = new AtomicInteger(0); + + /** + * Waited upon by the driver and completed when the subscriber requests its first item. + * + *

Used to hold off emitting results until the subscriber issues its first request for items. + * Since this future is only completed from {@link #request(long)}, this effectively conditions + * the enqueueing of the first page to the reception of the subscriber's first request. + * + *

This mechanism avoids sending terminal signals before a request is made when the stream is + * empty. Note that as per 2.9, "a Subscriber MUST be prepared to receive an onComplete signal + * with or without a preceding Subscription.request(long n) call." However, the TCK considers it + * as unfair behavior. + * + * @see #start(Callable) + * @see #request(long) + */ + private final CompletableFuture firstSubscriberRequestArrived = new CompletableFuture<>(); + + /** non-final because it has to be de-referenced, see {@link #clear()}. */ + private volatile Subscriber mainSubscriber; + + private volatile Subscriber executionInfosSubscriber; + + /** + * Set to true when the subscription is cancelled, which happens when an error is encountered, + * when the result set is fully consumed and the subscription terminates, or when the subscriber + * manually calls {@link #cancel()}. + */ + private volatile boolean cancelled = false; + + ReactiveGraphResultSetSubscription( + @NonNull Subscriber mainSubscriber, + @NonNull Subscriber executionInfosSubscriber) { + this.mainSubscriber = mainSubscriber; + this.executionInfosSubscriber = executionInfosSubscriber; + } + + /** + * Starts the query execution. + * + *

Must be called immediately after creating the subscription, but after {@link + * Subscriber#onSubscribe(Subscription)}. + * + * @param firstPage The future that, when complete, will produce the first page. + */ + void start(@NonNull Callable> firstPage) { + firstSubscriberRequestArrived.thenAccept( + (aVoid) -> fetchNextPageAndEnqueue(new Page(firstPage))); + } + + @Override + public void request(long n) { + // As per 3.6: after the Subscription is cancelled, additional + // calls to request() MUST be NOPs. + if (!cancelled) { + if (n < 1) { + // Validate request as per rule 3.9 + doOnError( + new IllegalArgumentException( + mainSubscriber + + " violated the Reactive Streams rule 3.9 by requesting a non-positive number of elements.")); + } else { + // As per rule 3.17, when demand overflows Long.MAX_VALUE + // it can be treated as "effectively unbounded" + ReactiveOperators.addCap(requested, n); + // Set the first future to true if not done yet. + // This will make the first page of results ready for consumption, + // see start(). + // As per 2.7 it is the subscriber's responsibility to provide + // external synchronization when calling request(), + // so the check-then-act idiom below is good enough + // (and besides, complete() is idempotent). + if (!firstSubscriberRequestArrived.isDone()) { + firstSubscriberRequestArrived.complete(null); + } + drain(); + } + } + } + + @Override + public void cancel() { + // As per 3.5: Subscription.cancel() MUST respect the responsiveness of + // its caller by returning in a timely manner, MUST be idempotent and + // MUST be thread-safe. + if (!cancelled) { + cancelled = true; + if (draining.getAndIncrement() == 0) { + // If nobody is draining, clear now; + // otherwise, the draining thread will notice + // that the cancelled flag was set + // and will clear for us. + clear(); + } + } + } + + /** + * Attempts to drain available items, i.e. emit them to the subscriber. + * + *

Access to this method is serialized by the field {@link #draining}: only one thread at a + * time can drain, but threads that attempt to drain while other thread is already draining + * increment that field; the draining thread, before finishing its work, checks for such failed + * attempts and triggers another round of draining if that was the case. + * + *

The loop is interrupted when 1) the requested amount has been met or 2) when there are no + * more items readily available or 3) the subscription has been cancelled. + * + *

The loop also checks for stream exhaustion and emits a terminal {@code onComplete} signal in + * this case. + * + *

This method may run on a driver IO thread when invoked from {@link + * #fetchNextPageAndEnqueue(Page)}, or on a subscriber thread, when invoked from {@link + * #request(long)}. + */ + @SuppressWarnings("ConditionalBreakInInfiniteLoop") + private void drain() { + // As per 3.4: this method SHOULD respect the responsiveness + // of its caller by returning in a timely manner. + // We accomplish this by a wait-free implementation. + if (draining.getAndIncrement() != 0) { + // Someone else is already draining, so do nothing, + // the other thread will notice that we attempted to drain. + // This also allows to abide by rule 3.3 and avoid + // cycles such as request() -> onNext() -> request() etc. + return; + } + int missed = 1; + // Note: when termination is detected inside this loop, + // we MUST call clear() manually. + for (; ; ) { + // The requested number of items at this point + long r = requested.get(); + // The number of items emitted thus far + long emitted = 0L; + while (emitted != r) { + if (cancelled) { + clear(); + return; + } + Object result; + try { + result = tryNext(); + } catch (Throwable t) { + doOnError(t); + clear(); + return; + } + if (result == null) { + break; + } + if (result instanceof Throwable) { + doOnError((Throwable) result); + clear(); + return; + } + doOnNext((ReactiveGraphNode) result); + emitted++; + } + if (isExhausted()) { + doOnComplete(); + clear(); + return; + } + if (cancelled) { + clear(); + return; + } + if (emitted != 0) { + // if any item was emitted, adjust the requested field + ReactiveOperators.subCap(requested, emitted); + } + // if another thread tried to call drain() while we were busy, + // then we should do another drain round. + missed = draining.addAndGet(-missed); + if (missed == 0) { + break; + } + } + } + + /** + * Tries to return the next item, if one is readily available, and returns {@code null} otherwise. + * + *

Cannot run concurrently due to the {@link #draining} field. + */ + @Nullable + private Object tryNext() { + Page current = pages.peek(); + if (current != null) { + if (current.hasMoreRows()) { + return current.nextRow(); + } else if (current.hasMorePages()) { + // Discard current page as it is consumed. + // Don't discard the last page though as we need it + // to test isExhausted(). It will be GC'ed when a terminal signal + // is issued anyway, so that's no big deal. + if (pages.poll() == null) { + throw new AssertionError("Queue is empty, this should not happen"); + } + current = pages.peek(); + // if the next page is readily available, + // serve its first row now, no need to wait + // for the next drain. + if (current != null && current.hasMoreRows()) { + return current.nextRow(); + } + } + } + // No items available right now. + return null; + } + + /** + * Returns {@code true} when the entire stream has been consumed and no more items can be emitted. + * When that is the case, a terminal signal is sent. + * + *

Cannot run concurrently due to the draining field. + */ + private boolean isExhausted() { + Page current = pages.peek(); + // Note: current can only be null when: + // 1) we are waiting for the first page and it hasn't arrived yet; + // 2) we just discarded the current page, but the next page hasn't arrived yet. + // In any case, a null here means it is not the last page, since the last page + // stays in the queue until the very end of the operation. + return current != null && !current.hasMoreRows() && !current.hasMorePages(); + } + + /** + * Runs on a subscriber thread initially, see {@link #start(Callable)}. Subsequent executions run + * on the thread that completes the pair of futures [current.fetchNextPage, pages.offer] and + * enqueues. This can be a driver IO thread or a subscriber thread; in both cases, cannot run + * concurrently due to the fact that one can only fetch the next page when the current one is + * arrived and enqueued. + */ + private void fetchNextPageAndEnqueue(@NonNull Page current) { + current + .fetchNextPage() + // as soon as the response arrives, + // create the new page + .handle( + (rs, t) -> { + Page page; + if (t == null) { + page = toPage(rs); + executionInfosSubscriber.onNext(rs.getRequestExecutionInfo()); + if (!page.hasMorePages()) { + executionInfosSubscriber.onComplete(); + } + } else { + // Unwrap CompletionExceptions created by combined futures + if (t instanceof CompletionException) { + t = t.getCause(); + } + page = toErrorPage(t); + executionInfosSubscriber.onError(t); + } + return page; + }) + .thenCompose(pages::offer) + .thenAccept( + page -> { + if (page.hasMorePages() && !cancelled) { + // preemptively fetch the next page, if available + fetchNextPageAndEnqueue(page); + } + drain(); + }); + } + + private void doOnNext(@NonNull ReactiveGraphNode result) { + try { + mainSubscriber.onNext(result); + } catch (Throwable t) { + LOG.error( + mainSubscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onNext.", + t); + cancel(); + } + } + + private void doOnComplete() { + try { + // Then we signal onComplete as per rules 1.2 and 1.5 + mainSubscriber.onComplete(); + } catch (Throwable t) { + LOG.error( + mainSubscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onComplete.", + t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + + // package-private because it can be invoked by the publisher if the subscription handshake + // process fails. + void doOnError(@NonNull Throwable error) { + try { + // Then we signal the error downstream, as per rules 1.2 and 1.4. + mainSubscriber.onError(error); + } catch (Throwable t) { + t.addSuppressed(error); + LOG.error( + mainSubscriber + + " violated the Reactive Streams rule 2.13 by throwing an exception from onError.", + t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + + private void clear() { + // We don't need these pages anymore and should not hold references + // to them. + pages.clear(); + // As per 3.13, Subscription.cancel() MUST request the Publisher to + // eventually drop any references to the corresponding subscriber. + // Our own publishers do not keep references to this subscription, + // but downstream processors might do so, which is why we need to + // defensively clear the subscriber reference when we are done. + mainSubscriber = null; + executionInfosSubscriber = null; + } + + /** + * Converts the received result object into a {@link Page}. + * + * @param rs the result object to convert. + * @return a new page. + */ + @NonNull + private Page toPage(@NonNull AsyncGraphResultSet rs) { + ExecutionInfo executionInfo = rs.getRequestExecutionInfo(); + Iterator results = + Iterators.transform( + rs.currentPage().iterator(), + row -> new DefaultReactiveGraphNode(Objects.requireNonNull(row), executionInfo)); + return new Page(results, rs.hasMorePages() ? rs::fetchNextPage : null); + } + + /** Converts the given error into a {@link Page}, containing the error as its only element. */ + @NonNull + private Page toErrorPage(@NonNull Throwable t) { + return new Page(Iterators.singletonIterator(t), null); + } + + /** + * A page object comprises an iterator over the page's results, and a future pointing to the next + * page (or {@code null}, if it's the last page). + */ + static class Page { + + @NonNull final Iterator iterator; + + // A pointer to the next page, or null if this is the last page. + @Nullable final Callable> nextPage; + + /** called only from start() */ + Page(@NonNull Callable> nextPage) { + this.iterator = Collections.emptyIterator(); + this.nextPage = nextPage; + } + + Page( + @NonNull Iterator iterator, + @Nullable Callable> nextPage) { + this.iterator = iterator; + this.nextPage = nextPage; + } + + boolean hasMorePages() { + return nextPage != null; + } + + @NonNull + CompletionStage fetchNextPage() { + try { + return Objects.requireNonNull(nextPage).call(); + } catch (Exception e) { + // This is a synchronous failure in the driver. + // It can happen in rare cases when the driver throws an exception instead of returning a + // failed future; e.g. if someone tries to execute a continuous paging request but the + // protocol version in use does not support it. + // We treat it as a failed future. + return CompletableFutures.failedFuture(e); + } + } + + boolean hasMoreRows() { + return iterator.hasNext(); + } + + @NonNull + Object nextRow() { + return iterator.next(); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java new file mode 100644 index 00000000000..cecc951a3ab --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/AddressFormatter.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import java.net.InetAddress; +import java.net.InetSocketAddress; + +class AddressFormatter { + + static String nullSafeToString(Object address) { + if (address instanceof InetAddress) { + return nullSafeToString((InetAddress) address); + } else if (address instanceof InetSocketAddress) { + return nullSafeToString((InetSocketAddress) address); + } else if (address instanceof String) { + return address.toString(); + } else { + return ""; + } + } + + static String nullSafeToString(InetAddress inetAddress) { + return inetAddress != null ? inetAddress.getHostAddress() : null; + } + + static String nullSafeToString(InetSocketAddress inetSocketAddress) { + if (inetSocketAddress != null) { + if (inetSocketAddress.isUnresolved()) { + return String.format( + "%s:%s", + nullSafeToString(inetSocketAddress.getHostName()), inetSocketAddress.getPort()); + } else { + return String.format( + "%s:%s", nullSafeToString(inetSocketAddress.getAddress()), inetSocketAddress.getPort()); + } + } + return null; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java new file mode 100644 index 00000000000..7f5b9c20a0e --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinder.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; + +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import java.util.HashMap; +import java.util.Map; + +class ConfigAntiPatternsFinder { + Map findAntiPatterns(InternalDriverContext driverContext) { + Map antiPatterns = new HashMap<>(); + findSslAntiPattern(driverContext, antiPatterns); + return antiPatterns; + } + + private void findSslAntiPattern( + InternalDriverContext driverContext, Map antiPatterns) { + boolean isSslDefined = + driverContext.getConfig().getDefaultProfile().isDefined(SSL_ENGINE_FACTORY_CLASS); + boolean certValidation = + driverContext.getConfig().getDefaultProfile().getBoolean(SSL_HOSTNAME_VALIDATION, false); + if (isSslDefined && !certValidation) { + antiPatterns.put( + "sslWithoutCertValidation", + "Client-to-node encryption is enabled but server certificate validation is disabled"); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java new file mode 100644 index 00000000000..7112b8dcdf7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinder.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +class DataCentersFinder { + + Set getDataCenters(InternalDriverContext driverContext) { + return getDataCenters( + driverContext.getMetadataManager().getMetadata().getNodes().values(), + driverContext.getConfig().getDefaultProfile()); + } + + @VisibleForTesting + Set getDataCenters(Collection nodes, DriverExecutionProfile executionProfile) { + + int remoteConnectionsLength = executionProfile.getInt(CONNECTION_POOL_REMOTE_SIZE); + + Set dataCenters = new HashSet<>(); + for (Node n : nodes) { + NodeDistance distance = n.getDistance(); + + if (distance.equals(NodeDistance.LOCAL) + || (distance.equals(NodeDistance.REMOTE) && remoteConnectionsLength > 0)) { + dataCenters.add(n.getDatacenter()); + } + } + return dataCenters; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java new file mode 100644 index 00000000000..a7c92d80d96 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinder.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.dse.driver.api.core.config.DseDriverOption.GRAPH_TRAVERSAL_SOURCE; + +import com.datastax.dse.driver.internal.core.insights.PackageUtil.ClassSettingDetails; +import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; +import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; +import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +class ExecutionProfilesInfoFinder { + Map getExecutionProfilesInfo( + InternalDriverContext driverContext) { + + SpecificExecutionProfile defaultProfile = + mapToSpecificProfile(driverContext.getConfig().getDefaultProfile()); + + return driverContext.getConfig().getProfiles().entrySet().stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + e -> { + if (isNotDefaultProfile(e)) { + SpecificExecutionProfile specificExecutionProfile = + mapToSpecificProfile(e.getValue()); + return retainOnlyDifferentFieldsFromSpecificProfile( + defaultProfile, specificExecutionProfile); + } else { + return defaultProfile; + } + })); + } + + private boolean isNotDefaultProfile(Map.Entry e) { + return !e.getKey().equals("default"); + } + + private SpecificExecutionProfile retainOnlyDifferentFieldsFromSpecificProfile( + SpecificExecutionProfile defaultProfile, SpecificExecutionProfile specificExecutionProfile) { + Integer readTimeout = + getIfDifferentOrReturnNull( + defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getReadTimeout); + LoadBalancingInfo loadBalancingInfo = + getIfDifferentOrReturnNull( + defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getLoadBalancing); + + SpeculativeExecutionInfo speculativeExecutionInfo = + getIfDifferentOrReturnNull( + defaultProfile, + specificExecutionProfile, + SpecificExecutionProfile::getSpeculativeExecution); + + String consistency = + getIfDifferentOrReturnNull( + defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getConsistency); + + String serialConsistency = + getIfDifferentOrReturnNull( + defaultProfile, + specificExecutionProfile, + SpecificExecutionProfile::getSerialConsistency); + + Map graphOptions = + getIfDifferentOrReturnNull( + defaultProfile, specificExecutionProfile, SpecificExecutionProfile::getGraphOptions); + + return new SpecificExecutionProfile( + readTimeout, + loadBalancingInfo, + speculativeExecutionInfo, + consistency, + serialConsistency, + graphOptions); + } + + private T getIfDifferentOrReturnNull( + SpecificExecutionProfile defaultProfile, + SpecificExecutionProfile profile, + Function valueExtractor) { + T defaultProfileValue = valueExtractor.apply(defaultProfile); + T specificProfileValue = valueExtractor.apply(profile); + if (defaultProfileValue.equals(specificProfileValue)) { + return null; + } else { + return specificProfileValue; + } + } + + private SpecificExecutionProfile mapToSpecificProfile( + DriverExecutionProfile driverExecutionProfile) { + return new SpecificExecutionProfile( + (int) driverExecutionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT).toMillis(), + getLoadBalancingInfo(driverExecutionProfile), + getSpeculativeExecutionInfo(driverExecutionProfile), + driverExecutionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY), + driverExecutionProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY), + getGraphOptions(driverExecutionProfile)); + } + + private SpeculativeExecutionInfo getSpeculativeExecutionInfo( + DriverExecutionProfile driverExecutionProfile) { + Map options = new LinkedHashMap<>(); + + putIfExists( + options, + "maxSpeculativeExecutions", + DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, + driverExecutionProfile); + putIfExists( + options, "delay", DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, driverExecutionProfile); + + ClassSettingDetails speculativeExecutionDetails = + PackageUtil.getSpeculativeExecutionDetails( + driverExecutionProfile.getString( + DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS)); + return new SpeculativeExecutionInfo( + speculativeExecutionDetails.getClassName(), + options, + speculativeExecutionDetails.getFullPackage()); + } + + private void putIfExists( + Map options, + String key, + DefaultDriverOption option, + DriverExecutionProfile executionProfile) { + if (executionProfile.isDefined(option)) { + options.put(key, executionProfile.getInt(option)); + } + } + + private LoadBalancingInfo getLoadBalancingInfo(DriverExecutionProfile driverExecutionProfile) { + Map options = new LinkedHashMap<>(); + if (driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { + options.put( + "localDataCenter", + driverExecutionProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)); + } + @SuppressWarnings("deprecation") + boolean hasNodeFiltering = + driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS) + || driverExecutionProfile.isDefined( + DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS); + options.put("filterFunction", hasNodeFiltering); + ClassSettingDetails loadBalancingDetails = + PackageUtil.getLoadBalancingDetails( + driverExecutionProfile.getString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS)); + return new LoadBalancingInfo( + loadBalancingDetails.getClassName(), options, loadBalancingDetails.getFullPackage()); + } + + private Map getGraphOptions(DriverExecutionProfile driverExecutionProfile) { + Map graphOptionsMap = new HashMap<>(); + String graphTraversalSource = driverExecutionProfile.getString(GRAPH_TRAVERSAL_SOURCE, null); + if (graphTraversalSource != null) { + graphOptionsMap.put("source", graphTraversalSource); + } + return graphOptionsMap; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java new file mode 100644 index 00000000000..f19687adf45 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsClient.java @@ -0,0 +1,491 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.AUTH_PROVIDER_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.HEARTBEAT_INTERVAL; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_COMPRESSION; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.internal.core.insights.PackageUtil.ClassSettingDetails; +import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; +import com.datastax.dse.driver.internal.core.insights.exceptions.InsightEventFormatException; +import com.datastax.dse.driver.internal.core.insights.schema.AuthProviderType; +import com.datastax.dse.driver.internal.core.insights.schema.Insight; +import com.datastax.dse.driver.internal.core.insights.schema.InsightMetadata; +import com.datastax.dse.driver.internal.core.insights.schema.InsightType; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsStartupData; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsStatusData; +import com.datastax.dse.driver.internal.core.insights.schema.PoolSizeByHostDistance; +import com.datastax.dse.driver.internal.core.insights.schema.SSL; +import com.datastax.dse.driver.internal.core.insights.schema.SessionStateForNode; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; +import com.datastax.oss.driver.internal.core.control.ControlConnection; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.protocol.internal.request.query.QueryOptions; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class InsightsClient { + private static final Logger LOGGER = LoggerFactory.getLogger(InsightsClient.class); + private static final String STARTUP_MESSAGE_NAME = "driver.startup"; + private static final String STATUS_MESSAGE_NAME = "driver.status"; + private static final String REPORT_INSIGHT_RPC = "CALL InsightsRpc.reportInsight(?)"; + private static final Map TAGS = ImmutableMap.of("language", "java"); + private static final String STARTUP_VERSION_1_ID = "v1"; + private static final String STATUS_VERSION_1_ID = "v1"; + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); + private static final int MAX_NUMBER_OF_STATUS_ERROR_LOGS = 5; + static final String DEFAULT_JAVA_APPLICATION = "Default Java Application"; + + private final ControlConnection controlConnection; + private final String id = Uuids.random().toString(); + private final InsightsConfiguration insightsConfiguration; + private final AtomicInteger numberOfStatusEventErrors = new AtomicInteger(); + + private final InternalDriverContext driverContext; + private final Supplier timestampSupplier; + private final PlatformInfoFinder platformInfoFinder; + private final ReconnectionPolicyInfoFinder reconnectionPolicyInfoInfoFinder; + private final ExecutionProfilesInfoFinder executionProfilesInfoFinder; + private final ConfigAntiPatternsFinder configAntiPatternsFinder; + private final DataCentersFinder dataCentersFinder; + private final StackTraceElement[] initCallStackTrace; + + private volatile ScheduledFuture scheduleInsightsTask; + + public static InsightsClient createInsightsClient( + InsightsConfiguration insightsConfiguration, + InternalDriverContext driverContext, + StackTraceElement[] initCallStackTrace) { + DataCentersFinder dataCentersFinder = new DataCentersFinder(); + return new InsightsClient( + driverContext, + () -> new Date().getTime(), + insightsConfiguration, + new PlatformInfoFinder(), + new ReconnectionPolicyInfoFinder(), + new ExecutionProfilesInfoFinder(), + new ConfigAntiPatternsFinder(), + dataCentersFinder, + initCallStackTrace); + } + + InsightsClient( + InternalDriverContext driverContext, + Supplier timestampSupplier, + InsightsConfiguration insightsConfiguration, + PlatformInfoFinder platformInfoFinder, + ReconnectionPolicyInfoFinder reconnectionPolicyInfoInfoFinder, + ExecutionProfilesInfoFinder executionProfilesInfoFinder, + ConfigAntiPatternsFinder configAntiPatternsFinder, + DataCentersFinder dataCentersFinder, + StackTraceElement[] initCallStackTrace) { + this.driverContext = driverContext; + this.controlConnection = driverContext.getControlConnection(); + this.timestampSupplier = timestampSupplier; + this.insightsConfiguration = insightsConfiguration; + this.platformInfoFinder = platformInfoFinder; + this.reconnectionPolicyInfoInfoFinder = reconnectionPolicyInfoInfoFinder; + this.executionProfilesInfoFinder = executionProfilesInfoFinder; + this.configAntiPatternsFinder = configAntiPatternsFinder; + this.dataCentersFinder = dataCentersFinder; + this.initCallStackTrace = initCallStackTrace; + } + + public CompletionStage sendStartupMessage() { + try { + if (!shouldSendEvent()) { + return CompletableFuture.completedFuture(null); + } else { + String startupMessage = createStartupMessage(); + return sendJsonMessage(startupMessage) + .whenComplete( + (aVoid, throwable) -> { + if (throwable != null) { + LOGGER.debug( + "Error while sending startup message to Insights. Message was: " + + trimToFirst500characters(startupMessage), + throwable); + } + }); + } + } catch (Exception e) { + LOGGER.debug("Unexpected error while sending startup message to Insights.", e); + return CompletableFutures.failedFuture(e); + } + } + + private static String trimToFirst500characters(String startupMessage) { + return startupMessage.substring(0, Math.min(startupMessage.length(), 500)); + } + + public void scheduleStatusMessageSend() { + if (!shouldSendEvent()) { + return; + } + scheduleInsightsTask = + scheduleInsightsTask( + insightsConfiguration.getStatusEventDelayMillis(), + insightsConfiguration.getExecutor(), + this::sendStatusMessage); + } + + public void shutdown() { + if (scheduleInsightsTask != null) { + scheduleInsightsTask.cancel(false); + } + } + + @VisibleForTesting + public CompletionStage sendStatusMessage() { + try { + String statusMessage = createStatusMessage(); + CompletionStage result = sendJsonMessage(statusMessage); + return result.whenComplete( + (aVoid, throwable) -> { + if (throwable != null) { + if (numberOfStatusEventErrors.getAndIncrement() < MAX_NUMBER_OF_STATUS_ERROR_LOGS) { + LOGGER.debug( + "Error while sending status message to Insights. Message was: " + + trimToFirst500characters(statusMessage), + throwable); + } + } + }); + } catch (Exception e) { + LOGGER.debug("Unexpected error while sending status message to Insights.", e); + return CompletableFutures.failedFuture(e); + } + } + + private CompletionStage sendJsonMessage(String jsonMessage) { + + QueryOptions queryOptions = createQueryOptionsWithJson(jsonMessage); + String logPrefix = driverContext.getSessionName(); + Duration timeout = + driverContext + .getConfig() + .getDefaultProfile() + .getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT); + LOGGER.debug("sending JSON message: {}", jsonMessage); + + Query query = new Query(REPORT_INSIGHT_RPC, queryOptions); + return AdminRequestHandler.call(controlConnection.channel(), query, timeout, logPrefix).start(); + } + + private QueryOptions createQueryOptionsWithJson(String json) { + TypeCodec codec = + driverContext.getCodecRegistry().codecFor(DataTypes.TEXT, String.class); + ByteBuffer startupMessageSerialized = codec.encode(json, DseProtocolVersion.DSE_V2); + return new QueryOptions( + QueryOptions.DEFAULT.consistency, + Collections.singletonList(startupMessageSerialized), + QueryOptions.DEFAULT.namedValues, + QueryOptions.DEFAULT.skipMetadata, + QueryOptions.DEFAULT.pageSize, + QueryOptions.DEFAULT.pagingState, + QueryOptions.DEFAULT.serialConsistency, + QueryOptions.DEFAULT.defaultTimestamp, + QueryOptions.DEFAULT.keyspace, + QueryOptions.DEFAULT.nowInSeconds); + } + + private boolean shouldSendEvent() { + try { + return insightsConfiguration.isMonitorReportingEnabled() + && InsightsSupportVerifier.supportsInsights( + driverContext.getMetadataManager().getMetadata().getNodes().values()); + } catch (Exception e) { + LOGGER.debug("Unexpected error while checking Insights support.", e); + return false; + } + } + + @VisibleForTesting + String createStartupMessage() { + InsightMetadata insightMetadata = createMetadata(STARTUP_MESSAGE_NAME, STARTUP_VERSION_1_ID); + InsightsStartupData data = createStartupData(); + + try { + return OBJECT_MAPPER.writeValueAsString(new Insight<>(insightMetadata, data)); + } catch (JsonProcessingException e) { + throw new InsightEventFormatException("Problem when creating: " + STARTUP_MESSAGE_NAME, e); + } + } + + @VisibleForTesting + String createStatusMessage() { + InsightMetadata insightMetadata = createMetadata(STATUS_MESSAGE_NAME, STATUS_VERSION_1_ID); + InsightsStatusData data = createStatusData(); + + try { + return OBJECT_MAPPER.writeValueAsString(new Insight<>(insightMetadata, data)); + } catch (JsonProcessingException e) { + throw new InsightEventFormatException("Problem when creating: " + STATUS_MESSAGE_NAME, e); + } + } + + private InsightsStatusData createStatusData() { + Map startupOptions = driverContext.getStartupOptions(); + return InsightsStatusData.builder() + .withClientId(getClientId(startupOptions)) + .withSessionId(id) + .withControlConnection(getControlConnectionSocketAddress()) + .withConnectedNodes(getConnectedNodes()) + .build(); + } + + private Map getConnectedNodes() { + Map pools = driverContext.getPoolManager().getPools(); + return pools.entrySet().stream() + .collect( + Collectors.toMap( + entry -> AddressFormatter.nullSafeToString(entry.getKey().getEndPoint().resolve()), + this::constructSessionStateForNode)); + } + + private SessionStateForNode constructSessionStateForNode(Map.Entry entry) { + return new SessionStateForNode( + entry.getKey().getOpenConnections(), entry.getValue().getInFlight()); + } + + private InsightsStartupData createStartupData() { + Map startupOptions = driverContext.getStartupOptions(); + return InsightsStartupData.builder() + .withClientId(getClientId(startupOptions)) + .withSessionId(id) + .withApplicationName(getApplicationName(startupOptions)) + .withApplicationVersion(getApplicationVersion(startupOptions)) + .withDriverName(getDriverName(startupOptions)) + .withDriverVersion(getDriverVersion(startupOptions)) + .withContactPoints( + getResolvedContactPoints( + driverContext.getMetadataManager().getContactPoints().stream() + .map(n -> n.getEndPoint().resolve()) + .filter(InetSocketAddress.class::isInstance) + .map(InetSocketAddress.class::cast) + .collect(Collectors.toSet()))) + .withInitialControlConnection(getControlConnectionSocketAddress()) + .withProtocolVersion(driverContext.getProtocolVersion().getCode()) + .withLocalAddress(getLocalAddress()) + .withExecutionProfiles(executionProfilesInfoFinder.getExecutionProfilesInfo(driverContext)) + .withPoolSizeByHostDistance(getPoolSizeByHostDistance()) + .withHeartbeatInterval( + driverContext + .getConfig() + .getDefaultProfile() + .getDuration(HEARTBEAT_INTERVAL) + .toMillis()) + .withCompression( + driverContext.getConfig().getDefaultProfile().getString(PROTOCOL_COMPRESSION, "none")) + .withReconnectionPolicy( + reconnectionPolicyInfoInfoFinder.getReconnectionPolicyInfo( + driverContext.getReconnectionPolicy(), + driverContext.getConfig().getDefaultProfile())) + .withSsl(getSsl()) + .withAuthProvider(getAuthProvider()) + .withOtherOptions(getOtherOptions()) + .withPlatformInfo(platformInfoFinder.getInsightsPlatformInfo()) + .withConfigAntiPatterns(configAntiPatternsFinder.findAntiPatterns(driverContext)) + .withPeriodicStatusInterval(getPeriodicStatusInterval()) + .withHostName(getLocalHostName()) + .withApplicationNameWasGenerated(isApplicationNameGenerated(startupOptions)) + .withDataCenters(dataCentersFinder.getDataCenters(driverContext)) + .build(); + } + + private AuthProviderType getAuthProvider() { + String authProviderClassName = + driverContext + .getConfig() + .getDefaultProfile() + .getString(AUTH_PROVIDER_CLASS, "NoAuthProvider"); + ClassSettingDetails authProviderDetails = + PackageUtil.getAuthProviderDetails(authProviderClassName); + return new AuthProviderType( + authProviderDetails.getClassName(), authProviderDetails.getFullPackage()); + } + + private long getPeriodicStatusInterval() { + return TimeUnit.MILLISECONDS.toSeconds(insightsConfiguration.getStatusEventDelayMillis()); + } + + @VisibleForTesting + static Map> getResolvedContactPoints(Set contactPoints) { + if (contactPoints == null) { + return Collections.emptyMap(); + } + return contactPoints.stream() + .collect( + Collectors.groupingBy( + InetSocketAddress::getHostName, + Collectors.mapping(AddressFormatter::nullSafeToString, Collectors.toList()))); + } + + private String getDriverVersion(Map startupOptions) { + return startupOptions.get(StartupOptionsBuilder.DRIVER_VERSION_KEY); + } + + private String getDriverName(Map startupOptions) { + return startupOptions.get(StartupOptionsBuilder.DRIVER_NAME_KEY); + } + + private String getClientId(Map startupOptions) { + return startupOptions.get(StartupOptionsBuilder.CLIENT_ID_KEY); + } + + private boolean isApplicationNameGenerated(Map startupOptions) { + return startupOptions.get(StartupOptionsBuilder.APPLICATION_NAME_KEY) == null; + } + + private String getApplicationVersion(Map startupOptions) { + String applicationVersion = startupOptions.get(StartupOptionsBuilder.APPLICATION_VERSION_KEY); + if (applicationVersion == null) { + return ""; + } + return applicationVersion; + } + + private String getApplicationName(Map startupOptions) { + String applicationName = startupOptions.get(StartupOptionsBuilder.APPLICATION_NAME_KEY); + if (applicationName == null || applicationName.isEmpty()) { + return getClusterCreateCaller(initCallStackTrace); + } + return applicationName; + } + + @VisibleForTesting + static String getClusterCreateCaller(StackTraceElement[] stackTrace) { + for (int i = 0; i < stackTrace.length - 1; i++) { + if (isClusterStackTrace(stackTrace[i])) { + int nextElement = i + 1; + if (!isClusterStackTrace(stackTrace[nextElement])) { + return stackTrace[nextElement].getClassName(); + } + } + } + return DEFAULT_JAVA_APPLICATION; + } + + private static boolean isClusterStackTrace(StackTraceElement stackTraceElement) { + return stackTraceElement.getClassName().equals(DefaultDriverContext.class.getName()) + || stackTraceElement.getClassName().equals(SessionBuilder.class.getName()); + } + + private String getLocalHostName() { + try { + return InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + LOGGER.warn("Can not resolve the name of a host, returning null", e); + return null; + } + } + + private Map getOtherOptions() { + return Collections.emptyMap(); // todo + } + + private SSL getSsl() { + boolean isSslDefined = + driverContext.getConfig().getDefaultProfile().isDefined(SSL_ENGINE_FACTORY_CLASS); + boolean certValidation = + driverContext.getConfig().getDefaultProfile().getBoolean(SSL_HOSTNAME_VALIDATION, false); + return new SSL(isSslDefined, certValidation); + } + + private PoolSizeByHostDistance getPoolSizeByHostDistance() { + + return new PoolSizeByHostDistance( + driverContext.getConfig().getDefaultProfile().getInt(CONNECTION_POOL_LOCAL_SIZE), + driverContext.getConfig().getDefaultProfile().getInt(CONNECTION_POOL_REMOTE_SIZE), + 0); + } + + private String getControlConnectionSocketAddress() { + SocketAddress controlConnectionAddress = controlConnection.channel().getEndPoint().resolve(); + return AddressFormatter.nullSafeToString(controlConnectionAddress); + } + + private String getLocalAddress() { + SocketAddress controlConnectionLocalAddress = controlConnection.channel().localAddress(); + if (controlConnectionLocalAddress instanceof InetSocketAddress) { + return AddressFormatter.nullSafeToString( + ((InetSocketAddress) controlConnectionLocalAddress).getAddress()); + } + return null; + } + + private InsightMetadata createMetadata(String messageName, String messageVersion) { + return new InsightMetadata( + messageName, timestampSupplier.get(), TAGS, InsightType.EVENT, messageVersion); + } + + @VisibleForTesting + static ScheduledFuture scheduleInsightsTask( + long statusEventDelayMillis, + ScheduledExecutorService scheduledTasksExecutor, + Runnable runnable) { + long initialDelay = + (long) Math.floor(statusEventDelayMillis - zeroToTenPercentRandom(statusEventDelayMillis)); + return scheduledTasksExecutor.scheduleWithFixedDelay( + runnable, initialDelay, statusEventDelayMillis, TimeUnit.MILLISECONDS); + } + + private static double zeroToTenPercentRandom(long statusEventDelayMillis) { + return 0.1 * statusEventDelayMillis * Math.random(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java new file mode 100644 index 00000000000..ec016ef52d8 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifier.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; +import java.util.Collection; + +class InsightsSupportVerifier { + private static final Version minDse6Version = Version.parse("6.0.5"); + private static final Version minDse51Version = Version.parse("5.1.13"); + private static final Version dse600Version = Version.parse("6.0.0"); + + static boolean supportsInsights(Collection nodes) { + assert minDse6Version != null; + assert dse600Version != null; + assert minDse51Version != null; + if (nodes.isEmpty()) return false; + + for (Node node : nodes) { + Object version = node.getExtras().get(DseNodeProperties.DSE_VERSION); + if (version == null) { + return false; + } + Version dseVersion = (Version) version; + if (!(dseVersion.compareTo(minDse6Version) >= 0 + || (dseVersion.compareTo(dse600Version) < 0 + && dseVersion.compareTo(minDse51Version) >= 0))) { + return false; + } + } + return true; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java new file mode 100644 index 00000000000..3c61dec4f20 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PackageUtil.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import com.datastax.oss.driver.shaded.guava.common.base.Joiner; +import java.util.Arrays; +import java.util.regex.Pattern; + +class PackageUtil { + static final String DEFAULT_SPECULATIVE_EXECUTION_PACKAGE = + "com.datastax.oss.driver.internal.core.specex"; + static final String DEFAULT_LOAD_BALANCING_PACKAGE = + "com.datastax.oss.driver.internal.core.loadbalancing"; + static final String DEFAULT_AUTH_PROVIDER_PACKAGE = "com.datastax.oss.driver.internal.core.auth"; + private static final Pattern PACKAGE_SPLIT_REGEX = Pattern.compile("\\."); + private static final Joiner DOT_JOINER = Joiner.on("."); + + static String getNamespace(Class tClass) { + String namespace = ""; + Package packageInfo = tClass.getPackage(); + if (packageInfo != null) { + namespace = packageInfo.getName(); + } + return namespace; + } + + static ClassSettingDetails getSpeculativeExecutionDetails(String classSetting) { + return getClassSettingDetails(classSetting, DEFAULT_SPECULATIVE_EXECUTION_PACKAGE); + } + + static ClassSettingDetails getLoadBalancingDetails(String classSetting) { + return getClassSettingDetails(classSetting, DEFAULT_LOAD_BALANCING_PACKAGE); + } + + static ClassSettingDetails getAuthProviderDetails(String classSetting) { + return getClassSettingDetails(classSetting, DEFAULT_AUTH_PROVIDER_PACKAGE); + } + + private static ClassSettingDetails getClassSettingDetails( + String classSetting, String packageName) { + String className = getClassName(classSetting); + String fullPackage = getFullPackageOrDefault(classSetting, packageName); + return new ClassSettingDetails(className, fullPackage); + } + + @VisibleForTesting + static String getClassName(String classSetting) { + String[] split = PACKAGE_SPLIT_REGEX.split(classSetting); + if (split.length == 0) { + return ""; + } + return split[split.length - 1]; + } + + @VisibleForTesting + static String getFullPackageOrDefault(String classSetting, String defaultValue) { + String[] split = PACKAGE_SPLIT_REGEX.split(classSetting); + if (split.length <= 1) return defaultValue; + return DOT_JOINER.join(Arrays.copyOf(split, split.length - 1)); + } + + static class ClassSettingDetails { + private final String className; + private final String fullPackage; + + ClassSettingDetails(String className, String fullPackage) { + this.className = className; + this.fullPackage = fullPackage; + } + + String getClassName() { + return className; + } + + String getFullPackage() { + return fullPackage; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java new file mode 100644 index 00000000000..30d41d40836 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinder.java @@ -0,0 +1,295 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.OS; +import static com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; + +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.CPUS; +import com.datastax.oss.driver.internal.core.os.Native; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.function.Function; +import java.util.regex.Pattern; + +class PlatformInfoFinder { + private static final String MAVEN_IGNORE_LINE = "The following files have been resolved:"; + private static final Pattern DEPENDENCY_SPLIT_REGEX = Pattern.compile(":"); + static final String UNVERIFIED_RUNTIME_VERSION = "UNVERIFIED"; + private final Function propertiesUrlProvider; + + @SuppressWarnings("UnnecessaryLambda") + private static final Function M2_PROPERTIES_PROVIDER = + d -> { + ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); + if (contextClassLoader == null) { + contextClassLoader = PlatformInfoFinder.class.getClassLoader(); + } + return contextClassLoader.getResource( + "META-INF/maven/" + d.groupId + "/" + d.artifactId + "/pom.properties"); + }; + + PlatformInfoFinder() { + this(M2_PROPERTIES_PROVIDER); + } + + @VisibleForTesting + PlatformInfoFinder(Function pomPropertiesUrlProvider) { + this.propertiesUrlProvider = pomPropertiesUrlProvider; + } + + InsightsPlatformInfo getInsightsPlatformInfo() { + OS os = getOsInfo(); + CPUS cpus = getCpuInfo(); + Map> runtimeInfo = getRuntimeInfo(); + + return new InsightsPlatformInfo(os, cpus, runtimeInfo); + } + + private Map> getRuntimeInfo() { + Map coreDeps = + fetchDependenciesFromFile( + this.getClass().getResourceAsStream("/com/datastax/dse/driver/internal/deps.txt")); + + Map queryBuilderDeps = + fetchDependenciesFromFile( + this.getClass() + .getResourceAsStream("/com/datastax/dse/driver/internal/querybuilder/deps.txt")); + + Map mapperProcessorDeps = + fetchDependenciesFromFile( + this.getClass() + .getResourceAsStream( + "/com/datastax/dse/driver/internal/mapper/processor/deps.txt")); + + Map mapperRuntimeDeps = + fetchDependenciesFromFile( + this.getClass() + .getResourceAsStream("/com/datastax/dse/driver/internal/mapper/deps.txt")); + + Map> runtimeDependencies = + new LinkedHashMap<>(); + putIfNonEmpty(coreDeps, runtimeDependencies, "core"); + putIfNonEmpty(queryBuilderDeps, runtimeDependencies, "query-builder"); + putIfNonEmpty(mapperProcessorDeps, runtimeDependencies, "mapper-processor"); + putIfNonEmpty(mapperRuntimeDeps, runtimeDependencies, "mapper-runtime"); + addJavaVersion(runtimeDependencies); + return runtimeDependencies; + } + + private void putIfNonEmpty( + Map moduleDependencies, + Map> runtimeDependencies, + String moduleName) { + if (!moduleDependencies.isEmpty()) { + runtimeDependencies.put(moduleName, moduleDependencies); + } + } + + @VisibleForTesting + void addJavaVersion(Map> runtimeDependencies) { + Package javaPackage = Runtime.class.getPackage(); + Map javaDependencies = new LinkedHashMap<>(); + javaDependencies.put( + "version", toSameRuntimeAndCompileVersion(javaPackage.getImplementationVersion())); + javaDependencies.put( + "vendor", toSameRuntimeAndCompileVersion(javaPackage.getImplementationVendor())); + javaDependencies.put( + "title", toSameRuntimeAndCompileVersion(javaPackage.getImplementationTitle())); + putIfNonEmpty(javaDependencies, runtimeDependencies, "java"); + } + + private RuntimeAndCompileTimeVersions toSameRuntimeAndCompileVersion(String version) { + return new RuntimeAndCompileTimeVersions(version, version, false); + } + + /** + * Method is fetching dependencies from file. Lines in file should be in format: + * com.organization:artifactId:jar:1.2.0 or com.organization:artifactId:jar:native:1.2.0 + * + *

For such file the output will be: Map + * "com.organization:artifactId",{"runtimeVersion":"1.2.0", "compileVersion:"1.2.0", "optional": + * false} Duplicates will be omitted. If there are two dependencies for the exactly the same + * organizationId:artifactId it is not deterministic which version will be taken. In the case of + * an error while opening file this method will fail silently returning an empty Map + */ + @VisibleForTesting + Map fetchDependenciesFromFile(InputStream inputStream) { + Map dependencies = new LinkedHashMap<>(); + if (inputStream == null) { + return dependencies; + } + try { + List dependenciesFromFile = extractMavenDependenciesFromFile(inputStream); + for (DependencyFromFile d : dependenciesFromFile) { + dependencies.put(formatDependencyName(d), getRuntimeAndCompileVersion(d)); + } + } catch (IOException e) { + return dependencies; + } + return dependencies; + } + + private RuntimeAndCompileTimeVersions getRuntimeAndCompileVersion(DependencyFromFile d) { + URL url = propertiesUrlProvider.apply(d); + if (url == null) { + return new RuntimeAndCompileTimeVersions( + UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); + } + Properties properties = new Properties(); + try { + properties.load(url.openStream()); + } catch (IOException e) { + return new RuntimeAndCompileTimeVersions( + UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); + } + Object version = properties.get("version"); + if (version == null) { + return new RuntimeAndCompileTimeVersions( + UNVERIFIED_RUNTIME_VERSION, d.getVersion(), d.isOptional()); + } else { + return new RuntimeAndCompileTimeVersions(version.toString(), d.getVersion(), d.isOptional()); + } + } + + private String formatDependencyName(DependencyFromFile d) { + return String.format("%s:%s", d.getGroupId(), d.getArtifactId()); + } + + private List extractMavenDependenciesFromFile(InputStream inputStream) + throws IOException { + List dependenciesFromFile = new ArrayList<>(); + BufferedReader reader = + new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); + for (String line; (line = reader.readLine()) != null; ) { + if (lineWithDependencyInfo(line)) { + dependenciesFromFile.add(extractDependencyFromLine(line.trim())); + } + } + return dependenciesFromFile; + } + + private DependencyFromFile extractDependencyFromLine(String line) { + String[] split = DEPENDENCY_SPLIT_REGEX.split(line); + if (split.length == 6) { // case for i.e.: com.github.jnr:jffi:jar:native:1.2.16:compile + return new DependencyFromFile(split[0], split[1], split[4], checkIsOptional(split[5])); + } else { // case for normal: org.ow2.asm:asm:jar:5.0.3:compile + return new DependencyFromFile(split[0], split[1], split[3], checkIsOptional(split[4])); + } + } + + private boolean checkIsOptional(String scope) { + return scope.contains("(optional)"); + } + + private boolean lineWithDependencyInfo(String line) { + return (!line.equals(MAVEN_IGNORE_LINE) && !line.isEmpty()); + } + + private CPUS getCpuInfo() { + int numberOfProcessors = Runtime.getRuntime().availableProcessors(); + String model = Native.getCpu(); + return new CPUS(numberOfProcessors, model); + } + + private OS getOsInfo() { + String osName = System.getProperty("os.name"); + String osVersion = System.getProperty("os.version"); + String osArch = System.getProperty("os.arch"); + return new OS(osName, osVersion, osArch); + } + + static class DependencyFromFile { + private final String groupId; + private final String artifactId; + private final String version; + private final boolean optional; + + DependencyFromFile(String groupId, String artifactId, String version, boolean optional) { + this.groupId = groupId; + this.artifactId = artifactId; + this.version = version; + this.optional = optional; + } + + String getGroupId() { + return groupId; + } + + String getArtifactId() { + return artifactId; + } + + String getVersion() { + return version; + } + + boolean isOptional() { + return optional; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DependencyFromFile)) { + return false; + } + DependencyFromFile that = (DependencyFromFile) o; + return optional == that.optional + && Objects.equals(groupId, that.groupId) + && Objects.equals(artifactId, that.artifactId) + && Objects.equals(version, that.version); + } + + @Override + public int hashCode() { + return Objects.hash(groupId, artifactId, version, optional); + } + + @Override + public String toString() { + return "DependencyFromFile{" + + "groupId='" + + groupId + + '\'' + + ", artifactId='" + + artifactId + + '\'' + + ", version='" + + version + + '\'' + + ", optional=" + + optional + + '}'; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java new file mode 100644 index 00000000000..af8aff74035 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinder.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; +import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; +import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; +import java.util.HashMap; +import java.util.Map; + +class ReconnectionPolicyInfoFinder { + ReconnectionPolicyInfo getReconnectionPolicyInfo( + ReconnectionPolicy reconnectionPolicy, DriverExecutionProfile executionProfile) { + Class reconnectionPolicyClass = reconnectionPolicy.getClass(); + String type = reconnectionPolicyClass.getSimpleName(); + String namespace = PackageUtil.getNamespace(reconnectionPolicyClass); + Map options = new HashMap<>(); + if (reconnectionPolicy instanceof ConstantReconnectionPolicy) { + options.put( + "delayMs", + executionProfile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY).toMillis()); + } else if (reconnectionPolicy instanceof ExponentialReconnectionPolicy) { + ExponentialReconnectionPolicy exponentialReconnectionPolicy = + (ExponentialReconnectionPolicy) reconnectionPolicy; + options.put("maxDelayMs", exponentialReconnectionPolicy.getMaxDelayMs()); + options.put("baseDelayMs", exponentialReconnectionPolicy.getBaseDelayMs()); + options.put("maxAttempts", exponentialReconnectionPolicy.getMaxAttempts()); + } + return new ReconnectionPolicyInfo(type, options, namespace); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java new file mode 100644 index 00000000000..ac27bb76389 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/configuration/InsightsConfiguration.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.configuration; + +import io.netty.util.concurrent.EventExecutor; + +public class InsightsConfiguration { + private final boolean monitorReportingEnabled; + private final long statusEventDelayMillis; + private final EventExecutor executor; + + public InsightsConfiguration( + boolean monitorReportingEnabled, long statusEventDelayMillis, EventExecutor executor) { + this.monitorReportingEnabled = monitorReportingEnabled; + this.statusEventDelayMillis = statusEventDelayMillis; + this.executor = executor; + } + + public boolean isMonitorReportingEnabled() { + return monitorReportingEnabled; + } + + public long getStatusEventDelayMillis() { + return statusEventDelayMillis; + } + + public EventExecutor getExecutor() { + return executor; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java new file mode 100644 index 00000000000..cfce68971ef --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/exceptions/InsightEventFormatException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.exceptions; + +public class InsightEventFormatException extends RuntimeException { + + public InsightEventFormatException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java new file mode 100644 index 00000000000..18aec53e899 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/AuthProviderType.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +public class AuthProviderType { + @JsonProperty("type") + private final String type; + + @JsonProperty("namespace") + private final String namespace; + + @JsonCreator + public AuthProviderType( + @JsonProperty("type") String type, @JsonProperty("namespace") String namespace) { + this.type = type; + this.namespace = namespace; + } + + public String getType() { + return type; + } + + public String getNamespace() { + return namespace; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof AuthProviderType)) { + return false; + } + AuthProviderType that = (AuthProviderType) o; + return Objects.equals(type, that.type) && Objects.equals(namespace, that.namespace); + } + + @Override + public int hashCode() { + return Objects.hash(type, namespace); + } + + @Override + public String toString() { + return "AuthProviderType{" + "type='" + type + '\'' + ", namespace='" + namespace + '\'' + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java new file mode 100644 index 00000000000..ca4e6455345 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/Insight.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_EMPTY) +public class Insight { + @JsonProperty("metadata") + private final InsightMetadata metadata; + + @JsonProperty("data") + private final T insightData; + + @JsonCreator + public Insight(@JsonProperty("metadata") InsightMetadata metadata, @JsonProperty("data") T data) { + this.metadata = metadata; + this.insightData = data; + } + + public InsightMetadata getMetadata() { + return metadata; + } + + public T getInsightData() { + return insightData; + } + + @Override + public String toString() { + return "Insight{" + "metadata=" + metadata + ", insightData=" + insightData + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java new file mode 100644 index 00000000000..cfa2644b0c7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightMetadata.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.base.Strings; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +public class InsightMetadata { + @JsonProperty("name") + private final String name; + + @JsonProperty("timestamp") + private final long timestamp; + + @JsonProperty("tags") + private final Map tags; + + @JsonProperty("insightType") + private final InsightType insightType; + + @JsonProperty("insightMappingId") + @JsonInclude(JsonInclude.Include.NON_NULL) + private String insightMappingId; + + @JsonCreator + public InsightMetadata( + @JsonProperty("name") String name, + @JsonProperty("timestamp") long timestamp, + @JsonProperty("tags") Map tags, + @JsonProperty("insightType") InsightType insightType, + @JsonProperty("insightMappingId") String insightMappingId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "name is required"); + + this.name = name; + this.timestamp = timestamp; + this.tags = tags; + this.insightType = insightType; + this.insightMappingId = insightMappingId; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof InsightMetadata)) { + return false; + } + InsightMetadata that = (InsightMetadata) o; + return Objects.equals(name, that.name) + && Objects.equals(timestamp, that.timestamp) + && Objects.equals(tags, that.tags) + && insightType == that.insightType + && Objects.equals(insightMappingId, that.insightMappingId); + } + + @Override + public int hashCode() { + return Objects.hash(name, timestamp, tags, insightType, insightMappingId); + } + + @Override + public String toString() { + return "InsightMetadata{" + + "name='" + + name + + '\'' + + ", timestamp=" + + timestamp + + ", tags=" + + tags + + ", insightType=" + + insightType + + ", insightMappingId=" + + insightMappingId + + '}'; + } + + public String getName() { + return name; + } + + public long getTimestamp() { + return timestamp; + } + + public Map getTags() { + return tags; + } + + public InsightType getInsightType() { + return insightType; + } + + public String getInsightMappingId() { + return insightMappingId; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java new file mode 100644 index 00000000000..ae91e27d227 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightType.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +public enum InsightType { + EVENT, + GAUGE, + COUNTER, + HISTOGRAM, + TIMER, + METER, + LOG; +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java new file mode 100644 index 00000000000..231f082d785 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsPlatformInfo.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +public class InsightsPlatformInfo { + @JsonProperty("os") + private final OS os; + + @JsonProperty("cpus") + private CPUS cpus; + + /** + * All dependencies in a map format grouped by the module: {"core" : {"com.datastax.driver:core": + * {"runtimeVersion:" : "1.0.0", "compileVersion": "1.0.1"},...}}, "extras"" {...} + */ + @JsonProperty("runtime") + private Map> runtime; + + @JsonCreator + public InsightsPlatformInfo( + @JsonProperty("os") OS os, + @JsonProperty("cpus") CPUS cpus, + @JsonProperty("runtime") Map> runtime) { + this.os = os; + this.cpus = cpus; + this.runtime = runtime; + } + + public OS getOs() { + return os; + } + + public CPUS getCpus() { + return cpus; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof InsightsPlatformInfo)) { + return false; + } + InsightsPlatformInfo that = (InsightsPlatformInfo) o; + return Objects.equals(os, that.os) + && Objects.equals(cpus, that.cpus) + && Objects.equals(runtime, that.runtime); + } + + @Override + public int hashCode() { + return Objects.hash(os, cpus, runtime); + } + + Map> getRuntime() { + return runtime; + } + + public static class OS { + @JsonProperty("name") + private final String name; + + @JsonProperty("version") + private final String version; + + @JsonProperty("arch") + private final String arch; + + @JsonCreator + public OS( + @JsonProperty("name") String name, + @JsonProperty("version") String version, + @JsonProperty("arch") String arch) { + this.name = name; + this.version = version; + this.arch = arch; + } + + public String getName() { + return name; + } + + public String getVersion() { + return version; + } + + public String getArch() { + return arch; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof OS)) { + return false; + } + OS os = (OS) o; + return Objects.equals(name, os.name) + && Objects.equals(version, os.version) + && Objects.equals(arch, os.arch); + } + + @Override + public int hashCode() { + return Objects.hash(name, version, arch); + } + } + + public static class CPUS { + @JsonProperty("length") + private final int length; + + @JsonProperty("model") + private final String model; + + @JsonCreator + public CPUS(@JsonProperty("length") int length, @JsonProperty("model") String model) { + this.length = length; + this.model = model; + } + + public int getLength() { + return length; + } + + public String getModel() { + return model; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CPUS)) { + return false; + } + CPUS cpus = (CPUS) o; + return length == cpus.length && Objects.equals(model, cpus.model); + } + + @Override + public int hashCode() { + return Objects.hash(length, model); + } + } + + public static class RuntimeAndCompileTimeVersions { + @JsonProperty("runtimeVersion") + private final String runtimeVersion; + + @JsonProperty("compileVersion") + private final String compileVersion; + + @JsonProperty("optional") + private final boolean optional; + + @JsonCreator + public RuntimeAndCompileTimeVersions( + @JsonProperty("runtimeVersion") String runtimeVersion, + @JsonProperty("compileVersion") String compileVersion, + @JsonProperty("optional") boolean optional) { + this.runtimeVersion = runtimeVersion; + this.compileVersion = compileVersion; + this.optional = optional; + } + + public String getRuntimeVersion() { + return runtimeVersion; + } + + public String getCompileVersion() { + return compileVersion; + } + + public boolean isOptional() { + return optional; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof RuntimeAndCompileTimeVersions)) { + return false; + } + RuntimeAndCompileTimeVersions that = (RuntimeAndCompileTimeVersions) o; + return optional == that.optional + && Objects.equals(runtimeVersion, that.runtimeVersion) + && Objects.equals(compileVersion, that.compileVersion); + } + + @Override + public int hashCode() { + return Objects.hash(runtimeVersion, compileVersion, optional); + } + + @Override + public String toString() { + return "RuntimeAndCompileTimeVersions{" + + "runtimeVersion='" + + runtimeVersion + + '\'' + + ", compileVersion='" + + compileVersion + + '\'' + + ", optional=" + + optional + + '}'; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java new file mode 100644 index 00000000000..bddd3ef94b3 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStartupData.java @@ -0,0 +1,425 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class InsightsStartupData { + @JsonProperty("clientId") + private final String clientId; + + @JsonProperty("sessionId") + private final String sessionId; + + @JsonProperty("applicationName") + private final String applicationName; + + @JsonProperty("applicationVersion") + private final String applicationVersion; + + @JsonProperty("contactPoints") + private final Map> contactPoints; + + @JsonProperty("initialControlConnection") + private final String initialControlConnection; + + @JsonProperty("protocolVersion") + private final int protocolVersion; + + @JsonProperty("localAddress") + private final String localAddress; + + @JsonProperty("executionProfiles") + private final Map executionProfiles; + + @JsonProperty("poolSizeByHostDistance") + private final PoolSizeByHostDistance poolSizeByHostDistance; + + @JsonProperty("heartbeatInterval") + private final long heartbeatInterval; + + @JsonProperty("compression") + private final String compression; + + @JsonProperty("reconnectionPolicy") + private final ReconnectionPolicyInfo reconnectionPolicy; + + @JsonProperty("ssl") + private final SSL ssl; + + @JsonProperty("authProvider") + private final AuthProviderType authProvider; + + @JsonProperty("otherOptions") + private final Map otherOptions; + + @JsonProperty("configAntiPatterns") + private final Map configAntiPatterns; + + @JsonProperty("periodicStatusInterval") + private final long periodicStatusInterval; + + @JsonProperty("platformInfo") + private final InsightsPlatformInfo platformInfo; + + @JsonProperty("hostName") + private final String hostName; + + @JsonProperty("driverName") + private String driverName; + + @JsonProperty("applicationNameWasGenerated") + private boolean applicationNameWasGenerated; + + @JsonProperty("driverVersion") + private String driverVersion; + + @JsonProperty("dataCenters") + private Set dataCenters; + + @JsonCreator + private InsightsStartupData( + @JsonProperty("clientId") String clientId, + @JsonProperty("sessionId") String sessionId, + @JsonProperty("applicationName") String applicationName, + @JsonProperty("applicationVersion") String applicationVersion, + @JsonProperty("contactPoints") Map> contactPoints, + @JsonProperty("initialControlConnection") String initialControlConnection, + @JsonProperty("protocolVersion") int protocolVersion, + @JsonProperty("localAddress") String localAddress, + @JsonProperty("executionProfiles") Map executionProfiles, + @JsonProperty("poolSizeByHostDistance") PoolSizeByHostDistance poolSizeByHostDistance, + @JsonProperty("heartbeatInterval") long heartbeatInterval, + @JsonProperty("compression") String compression, + @JsonProperty("reconnectionPolicy") ReconnectionPolicyInfo reconnectionPolicy, + @JsonProperty("ssl") SSL ssl, + @JsonProperty("authProvider") AuthProviderType authProvider, + @JsonProperty("otherOptions") Map otherOptions, + @JsonProperty("configAntiPatterns") Map configAntiPatterns, + @JsonProperty("periodicStatusInterval") long periodicStatusInterval, + @JsonProperty("platformInfo") InsightsPlatformInfo platformInfo, + @JsonProperty("hostName") String hostName, + @JsonProperty("driverName") String driverName, + @JsonProperty("applicationNameWasGenerated") boolean applicationNameWasGenerated, + @JsonProperty("driverVersion") String driverVersion, + @JsonProperty("dataCenters") Set dataCenters) { + this.clientId = clientId; + this.sessionId = sessionId; + this.applicationName = applicationName; + this.applicationVersion = applicationVersion; + this.contactPoints = contactPoints; + this.initialControlConnection = initialControlConnection; + this.protocolVersion = protocolVersion; + this.localAddress = localAddress; + this.executionProfiles = executionProfiles; + this.poolSizeByHostDistance = poolSizeByHostDistance; + this.heartbeatInterval = heartbeatInterval; + this.compression = compression; + this.reconnectionPolicy = reconnectionPolicy; + this.ssl = ssl; + this.authProvider = authProvider; + this.otherOptions = otherOptions; + this.configAntiPatterns = configAntiPatterns; + this.periodicStatusInterval = periodicStatusInterval; + this.platformInfo = platformInfo; + this.hostName = hostName; + this.driverName = driverName; + this.applicationNameWasGenerated = applicationNameWasGenerated; + this.driverVersion = driverVersion; + this.dataCenters = dataCenters; + } + + public String getClientId() { + return clientId; + } + + public String getSessionId() { + return sessionId; + } + + public String getApplicationName() { + return applicationName; + } + + public String getApplicationVersion() { + return applicationVersion; + } + + public Map> getContactPoints() { + return contactPoints; + } + + public String getInitialControlConnection() { + return initialControlConnection; + } + + public int getProtocolVersion() { + return protocolVersion; + } + + public String getLocalAddress() { + return localAddress; + } + + public Map getExecutionProfiles() { + return executionProfiles; + } + + public PoolSizeByHostDistance getPoolSizeByHostDistance() { + return poolSizeByHostDistance; + } + + public long getHeartbeatInterval() { + return heartbeatInterval; + } + + public String getCompression() { + return compression; + } + + public ReconnectionPolicyInfo getReconnectionPolicy() { + return reconnectionPolicy; + } + + public SSL getSsl() { + return ssl; + } + + public AuthProviderType getAuthProvider() { + return authProvider; + } + + public Map getOtherOptions() { + return otherOptions; + } + + public Map getConfigAntiPatterns() { + return configAntiPatterns; + } + + public long getPeriodicStatusInterval() { + return periodicStatusInterval; + } + + public InsightsPlatformInfo getPlatformInfo() { + return platformInfo; + } + + public String getHostName() { + return hostName; + } + + public String getDriverName() { + return driverName; + } + + public boolean isApplicationNameWasGenerated() { + return applicationNameWasGenerated; + } + + public String getDriverVersion() { + return driverVersion; + } + + public Set getDataCenters() { + return dataCenters; + } + + public static InsightsStartupData.Builder builder() { + return new InsightsStartupData.Builder(); + } + + public static class Builder { + private String clientId; + private String sessionId; + private String applicationName; + private String applicationVersion; + private Map> contactPoints; + private String initialControlConnection; + private int protocolVersion; + private String localAddress; + private Map executionProfiles; + private PoolSizeByHostDistance poolSizeByHostDistance; + private long heartbeatInterval; + private String compression; + private ReconnectionPolicyInfo reconnectionPolicy; + private SSL ssl; + private AuthProviderType authProvider; + private Map otherOptions; + private Map configAntiPatterns; + private long periodicStatusInterval; + private InsightsPlatformInfo platformInfo; + private String hostName; + private String driverName; + private String driverVersion; + private boolean applicationNameWasGenerated; + private Set dataCenters; + + public InsightsStartupData build() { + return new InsightsStartupData( + clientId, + sessionId, + applicationName, + applicationVersion, + contactPoints, + initialControlConnection, + protocolVersion, + localAddress, + executionProfiles, + poolSizeByHostDistance, + heartbeatInterval, + compression, + reconnectionPolicy, + ssl, + authProvider, + otherOptions, + configAntiPatterns, + periodicStatusInterval, + platformInfo, + hostName, + driverName, + applicationNameWasGenerated, + driverVersion, + dataCenters); + } + + public Builder withClientId(String clientId) { + this.clientId = clientId; + return this; + } + + public Builder withSessionId(String id) { + this.sessionId = id; + return this; + } + + public Builder withApplicationName(String applicationName) { + this.applicationName = applicationName; + return this; + } + + public Builder withApplicationVersion(String applicationVersion) { + this.applicationVersion = applicationVersion; + return this; + } + + public Builder withContactPoints(Map> contactPoints) { + this.contactPoints = contactPoints; + return this; + } + + public Builder withInitialControlConnection(String inetSocketAddress) { + this.initialControlConnection = inetSocketAddress; + return this; + } + + public Builder withProtocolVersion(int protocolVersion) { + this.protocolVersion = protocolVersion; + return this; + } + + public Builder withLocalAddress(String localAddress) { + this.localAddress = localAddress; + return this; + } + + public Builder withExecutionProfiles(Map executionProfiles) { + this.executionProfiles = executionProfiles; + return this; + } + + public Builder withPoolSizeByHostDistance(PoolSizeByHostDistance poolSizeByHostDistance) { + this.poolSizeByHostDistance = poolSizeByHostDistance; + return this; + } + + public Builder withHeartbeatInterval(long heartbeatInterval) { + this.heartbeatInterval = heartbeatInterval; + return this; + } + + public Builder withCompression(String compression) { + this.compression = compression; + return this; + } + + public Builder withReconnectionPolicy(ReconnectionPolicyInfo reconnectionPolicy) { + this.reconnectionPolicy = reconnectionPolicy; + return this; + } + + public Builder withSsl(SSL ssl) { + this.ssl = ssl; + return this; + } + + public Builder withAuthProvider(AuthProviderType authProvider) { + this.authProvider = authProvider; + return this; + } + + public Builder withOtherOptions(Map otherOptions) { + this.otherOptions = otherOptions; + return this; + } + + public Builder withConfigAntiPatterns(Map configAntiPatterns) { + this.configAntiPatterns = configAntiPatterns; + return this; + } + + public Builder withPeriodicStatusInterval(long periodicStatusInterval) { + this.periodicStatusInterval = periodicStatusInterval; + return this; + } + + public Builder withPlatformInfo(InsightsPlatformInfo insightsPlatformInfo) { + this.platformInfo = insightsPlatformInfo; + return this; + } + + public Builder withHostName(String hostName) { + this.hostName = hostName; + return this; + } + + public Builder withDriverName(String driverName) { + this.driverName = driverName; + return this; + } + + public Builder withDriverVersion(String driverVersion) { + this.driverVersion = driverVersion; + return this; + } + + public Builder withApplicationNameWasGenerated(boolean applicationNameWasGenerated) { + this.applicationNameWasGenerated = applicationNameWasGenerated; + return this; + } + + public Builder withDataCenters(Set dataCenters) { + this.dataCenters = dataCenters; + return this; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java new file mode 100644 index 00000000000..6f5a135f7c4 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/InsightsStatusData.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +public class InsightsStatusData { + @JsonProperty("clientId") + private final String clientId; + + @JsonProperty("sessionId") + private final String sessionId; + + @JsonProperty("controlConnection") + private final String controlConnection; + + @JsonProperty("connectedNodes") + private final Map connectedNodes; + + @JsonCreator + private InsightsStatusData( + @JsonProperty("clientId") String clientId, + @JsonProperty("sessionId") String sessionId, + @JsonProperty("controlConnection") String controlConnection, + @JsonProperty("connectedNodes") Map connectedNodes) { + this.clientId = clientId; + this.sessionId = sessionId; + this.controlConnection = controlConnection; + this.connectedNodes = connectedNodes; + } + + public String getClientId() { + return clientId; + } + + public String getSessionId() { + return sessionId; + } + + public String getControlConnection() { + return controlConnection; + } + + public Map getConnectedNodes() { + return connectedNodes; + } + + @Override + public String toString() { + return "InsightsStatusData{" + + "clientId='" + + clientId + + '\'' + + ", sessionId='" + + sessionId + + '\'' + + ", controlConnection=" + + controlConnection + + ", connectedNodes=" + + connectedNodes + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof InsightsStatusData)) { + return false; + } + InsightsStatusData that = (InsightsStatusData) o; + return Objects.equals(clientId, that.clientId) + && Objects.equals(sessionId, that.sessionId) + && Objects.equals(controlConnection, that.controlConnection) + && Objects.equals(connectedNodes, that.connectedNodes); + } + + @Override + public int hashCode() { + return Objects.hash(clientId, sessionId, controlConnection, connectedNodes); + } + + public static InsightsStatusData.Builder builder() { + return new InsightsStatusData.Builder(); + } + + public static class Builder { + private String clientId; + private String sessionId; + private String controlConnection; + private Map connectedNodes; + + public Builder withClientId(String clientId) { + this.clientId = clientId; + return this; + } + + public Builder withSessionId(String id) { + this.sessionId = id; + return this; + } + + public Builder withControlConnection(String controlConnection) { + this.controlConnection = controlConnection; + return this; + } + + public Builder withConnectedNodes(Map connectedNodes) { + this.connectedNodes = connectedNodes; + return this; + } + + public InsightsStatusData build() { + return new InsightsStatusData(clientId, sessionId, controlConnection, connectedNodes); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java new file mode 100644 index 00000000000..594583e3f28 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/LoadBalancingInfo.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +public class LoadBalancingInfo { + @JsonProperty("type") + private final String type; + + @JsonProperty("options") + private final Map options; + + @JsonProperty("namespace") + private final String namespace; + + @JsonCreator + public LoadBalancingInfo( + @JsonProperty("type") String type, + @JsonProperty("options") Map options, + @JsonProperty("namespace") String namespace) { + this.type = type; + this.options = options; + this.namespace = namespace; + } + + public String getType() { + return type; + } + + public Map getOptions() { + return options; + } + + public String getNamespace() { + return namespace; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof LoadBalancingInfo)) { + return false; + } + LoadBalancingInfo that = (LoadBalancingInfo) o; + return Objects.equals(type, that.type) + && Objects.equals(options, that.options) + && Objects.equals(namespace, that.namespace); + } + + @Override + public int hashCode() { + return Objects.hash(type, options, namespace); + } + + @Override + public String toString() { + return "LoadBalancingInfo{" + + "type='" + + type + + '\'' + + ", options=" + + options + + ", namespace='" + + namespace + + '\'' + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java new file mode 100644 index 00000000000..07f76a18d40 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/PoolSizeByHostDistance.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +public class PoolSizeByHostDistance { + @JsonProperty("local") + private final int local; + + @JsonProperty("remote") + private final int remote; + + @JsonProperty("ignored") + private final int ignored; + + @JsonCreator + public PoolSizeByHostDistance( + @JsonProperty("local") int local, + @JsonProperty("remote") int remote, + @JsonProperty("ignored") int ignored) { + + this.local = local; + this.remote = remote; + this.ignored = ignored; + } + + public int getLocal() { + return local; + } + + public int getRemote() { + return remote; + } + + public int getIgnored() { + return ignored; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PoolSizeByHostDistance)) { + return false; + } + PoolSizeByHostDistance that = (PoolSizeByHostDistance) o; + return local == that.local && remote == that.remote && ignored == that.ignored; + } + + @Override + public int hashCode() { + return Objects.hash(local, remote, ignored); + } + + @Override + public String toString() { + return "PoolSizeByHostDistance{" + + "local=" + + local + + ", remote=" + + remote + + ", ignored=" + + ignored + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java new file mode 100644 index 00000000000..463c23a4325 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/ReconnectionPolicyInfo.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +public class ReconnectionPolicyInfo { + @JsonProperty("type") + private final String type; + + @JsonProperty("options") + private final Map options; + + @JsonProperty("namespace") + private final String namespace; + + @JsonCreator + public ReconnectionPolicyInfo( + @JsonProperty("type") String type, + @JsonProperty("options") Map options, + @JsonProperty("namespace") String namespace) { + + this.type = type; + this.options = options; + this.namespace = namespace; + } + + public String getType() { + return type; + } + + public Map getOptions() { + return options; + } + + public String getNamespace() { + return namespace; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReconnectionPolicyInfo)) { + return false; + } + ReconnectionPolicyInfo that = (ReconnectionPolicyInfo) o; + return Objects.equals(type, that.type) + && Objects.equals(options, that.options) + && Objects.equals(namespace, that.namespace); + } + + @Override + public int hashCode() { + return Objects.hash(type, options, namespace); + } + + @Override + public String toString() { + return "ReconnectionPolicyInfo{" + + "type='" + + type + + '\'' + + ", options=" + + options + + ", namespace='" + + namespace + + '\'' + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java new file mode 100644 index 00000000000..debcd85c025 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SSL.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +public class SSL { + @JsonProperty("enabled") + private final boolean enabled; + + @JsonProperty("certValidation") + private final boolean certValidation; + + @JsonCreator + public SSL( + @JsonProperty("enabled") boolean enabled, + @JsonProperty("certValidation") boolean certValidation) { + this.enabled = enabled; + this.certValidation = certValidation; + } + + public boolean isEnabled() { + return enabled; + } + + public boolean isCertValidation() { + return certValidation; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SSL)) { + return false; + } + SSL that = (SSL) o; + return enabled == that.enabled && certValidation == that.certValidation; + } + + @Override + public int hashCode() { + return Objects.hash(enabled, certValidation); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java new file mode 100644 index 00000000000..8b50e5b2313 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SessionStateForNode.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +public class SessionStateForNode { + @JsonProperty("connections") + private final Integer connections; + + @JsonProperty("inFlightQueries") + private final Integer inFlightQueries; + + @JsonCreator + public SessionStateForNode( + @JsonProperty("connections") Integer connections, + @JsonProperty("inFlightQueries") Integer inFlightQueries) { + this.connections = connections; + this.inFlightQueries = inFlightQueries; + } + + public Integer getConnections() { + return connections; + } + + public Integer getInFlightQueries() { + return inFlightQueries; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SessionStateForNode)) { + return false; + } + SessionStateForNode that = (SessionStateForNode) o; + return Objects.equals(connections, that.connections) + && Objects.equals(inFlightQueries, that.inFlightQueries); + } + + @Override + public int hashCode() { + return Objects.hash(connections, inFlightQueries); + } + + @Override + public String toString() { + return "SessionStateForNode{" + + "connections=" + + connections + + ", inFlightQueries=" + + inFlightQueries + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java new file mode 100644 index 00000000000..58652fdf885 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpecificExecutionProfile.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +@JsonInclude(JsonInclude.Include.NON_NULL) +public class SpecificExecutionProfile { + @JsonProperty("readTimeout") + private final Integer readTimeout; + + @JsonProperty("loadBalancing") + private final LoadBalancingInfo loadBalancing; + + @JsonProperty("speculativeExecution") + private SpeculativeExecutionInfo speculativeExecution; + + @JsonProperty("consistency") + private final String consistency; + + @JsonProperty("serialConsistency") + private final String serialConsistency; + + @JsonProperty("graphOptions") + private Map graphOptions; + + @JsonCreator + public SpecificExecutionProfile( + @JsonProperty("readTimeout") Integer readTimeoutMillis, + @JsonProperty("loadBalancing") LoadBalancingInfo loadBalancing, + @JsonProperty("speculativeExecution") SpeculativeExecutionInfo speculativeExecutionInfo, + @JsonProperty("consistency") String consistency, + @JsonProperty("serialConsistency") String serialConsistency, + @JsonProperty("graphOptions") Map graphOptions) { + readTimeout = readTimeoutMillis; + this.loadBalancing = loadBalancing; + this.speculativeExecution = speculativeExecutionInfo; + this.consistency = consistency; + this.serialConsistency = serialConsistency; + this.graphOptions = graphOptions; + } + + public Integer getReadTimeout() { + return readTimeout; + } + + public LoadBalancingInfo getLoadBalancing() { + return loadBalancing; + } + + public SpeculativeExecutionInfo getSpeculativeExecution() { + return speculativeExecution; + } + + public String getConsistency() { + return consistency; + } + + public String getSerialConsistency() { + return serialConsistency; + } + + public Map getGraphOptions() { + return graphOptions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SpecificExecutionProfile)) { + return false; + } + SpecificExecutionProfile that = (SpecificExecutionProfile) o; + return Objects.equals(readTimeout, that.readTimeout) + && Objects.equals(loadBalancing, that.loadBalancing) + && Objects.equals(speculativeExecution, that.speculativeExecution) + && Objects.equals(consistency, that.consistency) + && Objects.equals(serialConsistency, that.serialConsistency) + && Objects.equals(graphOptions, that.graphOptions); + } + + @Override + public int hashCode() { + return Objects.hash( + readTimeout, + loadBalancing, + speculativeExecution, + consistency, + serialConsistency, + graphOptions); + } + + @Override + public String toString() { + return "SpecificExecutionProfile{" + + "readTimeout=" + + readTimeout + + ", loadBalancing=" + + loadBalancing + + ", speculativeExecution=" + + speculativeExecution + + ", consistency='" + + consistency + + '\'' + + ", serialConsistency='" + + serialConsistency + + '\'' + + ", graphOptions=" + + graphOptions + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java new file mode 100644 index 00000000000..779a4ed9e51 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/insights/schema/SpeculativeExecutionInfo.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights.schema; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +public class SpeculativeExecutionInfo { + @JsonProperty("type") + private final String type; + + @JsonProperty("options") + private final Map options; + + @JsonProperty("namespace") + private String namespace; + + @JsonCreator + public SpeculativeExecutionInfo( + @JsonProperty("type") String type, + @JsonProperty("options") Map options, + @JsonProperty("namespace") String namespace) { + this.type = type; + this.options = options; + this.namespace = namespace; + } + + public String getType() { + return type; + } + + public Map getOptions() { + return options; + } + + public String getNamespace() { + return namespace; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SpeculativeExecutionInfo)) { + return false; + } + SpeculativeExecutionInfo that = (SpeculativeExecutionInfo) o; + return Objects.equals(type, that.type) + && Objects.equals(options, that.options) + && Objects.equals(namespace, that.namespace); + } + + @Override + public int hashCode() { + return Objects.hash(type, options, namespace); + } + + @Override + public String toString() { + return "SpeculativeExecutionInfo{" + + "type='" + + type + + '\'' + + ", options=" + + options + + ", namespace='" + + namespace + + '\'' + + '}'; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java new file mode 100644 index 00000000000..501fa263258 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseDcInferringLoadBalancingPolicy.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.loadbalancing; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * @deprecated This class only exists for backward compatibility. It is equivalent to {@link + * DcInferringLoadBalancingPolicy}, which should now be used instead. + */ +@Deprecated +public class DseDcInferringLoadBalancingPolicy extends DcInferringLoadBalancingPolicy { + public DseDcInferringLoadBalancingPolicy( + @NonNull DriverContext context, @NonNull String profileName) { + super(context, profileName); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java new file mode 100644 index 00000000000..059a37c4774 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/loadbalancing/DseLoadBalancingPolicy.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.loadbalancing; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * @deprecated This class only exists for backward compatibility. It is equivalent to {@link + * DefaultLoadBalancingPolicy}, which should now be used instead. + */ +@Deprecated +public class DseLoadBalancingPolicy extends DefaultLoadBalancingPolicy { + public DseLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { + super(context, profileName); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java new file mode 100644 index 00000000000..52a0b846076 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseAggregateMetadata.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultAggregateMetadata; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Objects; +import java.util.Optional; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseAggregateMetadata extends DefaultAggregateMetadata + implements DseAggregateMetadata { + + @Nullable private final Boolean deterministic; + + public DefaultDseAggregateMetadata( + @NonNull CqlIdentifier keyspace, + @NonNull FunctionSignature signature, + @Nullable FunctionSignature finalFuncSignature, + @Nullable Object initCond, + @NonNull DataType returnType, + @NonNull FunctionSignature stateFuncSignature, + @NonNull DataType stateType, + @NonNull TypeCodec stateTypeCodec, + @Nullable Boolean deterministic) { + super( + keyspace, + signature, + finalFuncSignature, + initCond, + returnType, + stateFuncSignature, + stateType, + stateTypeCodec); + this.deterministic = deterministic; + } + + @Override + @Deprecated + public boolean isDeterministic() { + return deterministic != null && deterministic; + } + + @Override + @Nullable + public Optional getDeterministic() { + return Optional.ofNullable(deterministic); + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseAggregateMetadata) { + DseAggregateMetadata that = (DseAggregateMetadata) other; + return Objects.equals(this.getKeyspace(), that.getKeyspace()) + && Objects.equals(this.getSignature(), that.getSignature()) + && Objects.equals( + this.getFinalFuncSignature().orElse(null), that.getFinalFuncSignature().orElse(null)) + && Objects.equals(this.getInitCond().orElse(null), that.getInitCond().orElse(null)) + && Objects.equals(this.getReturnType(), that.getReturnType()) + && Objects.equals(this.getStateFuncSignature(), that.getStateFuncSignature()) + && Objects.equals(this.getStateType(), that.getStateType()) + && Objects.equals(this.deterministic, that.getDeterministic().orElse(null)); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash( + getKeyspace(), + getSignature(), + getFinalFuncSignature(), + getInitCond(), + getReturnType(), + getStateFuncSignature(), + getStateType(), + deterministic); + } + + @Override + public String toString() { + return "Aggregate Name: " + + getSignature().getName().asCql(false) + + ", Keyspace: " + + getKeyspace().asCql(false) + + ", Return Type: " + + getReturnType().asCql(false, false) + + ", Deterministic: " + + deterministic; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java new file mode 100644 index 00000000000..2168f20fdc7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseColumnMetadata.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseColumnMetadata extends DefaultColumnMetadata implements DseColumnMetadata { + + public DefaultDseColumnMetadata( + @NonNull CqlIdentifier keyspace, + @NonNull CqlIdentifier parent, + @NonNull CqlIdentifier name, + @NonNull DataType dataType, + boolean isStatic) { + super(keyspace, parent, name, dataType, isStatic); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java new file mode 100644 index 00000000000..e4de62f294c --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseEdgeMetadata.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; +import java.util.List; +import java.util.Objects; + +public class DefaultDseEdgeMetadata implements DseEdgeMetadata, Serializable { + + private static final long serialVersionUID = 1; + + @NonNull private final CqlIdentifier labelName; + + @NonNull private final CqlIdentifier fromTable; + @NonNull private final CqlIdentifier fromLabel; + @NonNull private final List fromPartitionKeyColumns; + @NonNull private final List fromClusteringColumns; + + @NonNull private final CqlIdentifier toTable; + @NonNull private final CqlIdentifier toLabel; + @NonNull private final List toPartitionKeyColumns; + @NonNull private final List toClusteringColumns; + + public DefaultDseEdgeMetadata( + @NonNull CqlIdentifier labelName, + @NonNull CqlIdentifier fromTable, + @NonNull CqlIdentifier fromLabel, + @NonNull List fromPartitionKeyColumns, + @NonNull List fromClusteringColumns, + @NonNull CqlIdentifier toTable, + @NonNull CqlIdentifier toLabel, + @NonNull List toPartitionKeyColumns, + @NonNull List toClusteringColumns) { + this.labelName = Preconditions.checkNotNull(labelName); + this.fromTable = Preconditions.checkNotNull(fromTable); + this.fromLabel = Preconditions.checkNotNull(fromLabel); + this.fromPartitionKeyColumns = Preconditions.checkNotNull(fromPartitionKeyColumns); + this.fromClusteringColumns = Preconditions.checkNotNull(fromClusteringColumns); + this.toTable = Preconditions.checkNotNull(toTable); + this.toLabel = Preconditions.checkNotNull(toLabel); + this.toPartitionKeyColumns = Preconditions.checkNotNull(toPartitionKeyColumns); + this.toClusteringColumns = Preconditions.checkNotNull(toClusteringColumns); + } + + @NonNull + @Override + public CqlIdentifier getLabelName() { + return labelName; + } + + @NonNull + @Override + public CqlIdentifier getFromTable() { + return fromTable; + } + + @NonNull + @Override + public CqlIdentifier getFromLabel() { + return fromLabel; + } + + @NonNull + @Override + public List getFromPartitionKeyColumns() { + return fromPartitionKeyColumns; + } + + @NonNull + @Override + public List getFromClusteringColumns() { + return fromClusteringColumns; + } + + @NonNull + @Override + public CqlIdentifier getToTable() { + return toTable; + } + + @NonNull + @Override + public CqlIdentifier getToLabel() { + return toLabel; + } + + @NonNull + @Override + public List getToPartitionKeyColumns() { + return toPartitionKeyColumns; + } + + @NonNull + @Override + public List getToClusteringColumns() { + return toClusteringColumns; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseEdgeMetadata) { + DseEdgeMetadata that = (DseEdgeMetadata) other; + return Objects.equals(this.labelName, that.getLabelName()) + && Objects.equals(this.fromTable, that.getFromTable()) + && Objects.equals(this.fromLabel, that.getFromLabel()) + && Objects.equals(this.fromPartitionKeyColumns, that.getFromPartitionKeyColumns()) + && Objects.equals(this.fromClusteringColumns, that.getFromClusteringColumns()) + && Objects.equals(this.toTable, that.getToTable()) + && Objects.equals(this.toLabel, that.getToLabel()) + && Objects.equals(this.toPartitionKeyColumns, that.getToPartitionKeyColumns()) + && Objects.equals(this.toClusteringColumns, that.getToClusteringColumns()); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash( + labelName, + fromTable, + fromLabel, + fromPartitionKeyColumns, + fromClusteringColumns, + toTable, + toLabel, + toPartitionKeyColumns, + toClusteringColumns); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java new file mode 100644 index 00000000000..0a94491f1f7 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseFunctionMetadata.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultFunctionMetadata; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseFunctionMetadata extends DefaultFunctionMetadata + implements DseFunctionMetadata { + + @Nullable private final Boolean deterministic; + @Nullable private final Monotonicity monotonicity; + @NonNull private final List monotonicArgumentNames; + + public DefaultDseFunctionMetadata( + @NonNull CqlIdentifier keyspace, + @NonNull FunctionSignature signature, + @NonNull List parameterNames, + @NonNull String body, + boolean calledOnNullInput, + @NonNull String language, + @NonNull DataType returnType, + @Nullable Boolean deterministic, + @Nullable Boolean monotonic, + @NonNull List monotonicArgumentNames) { + super(keyspace, signature, parameterNames, body, calledOnNullInput, language, returnType); + // set DSE extension attributes + this.deterministic = deterministic; + this.monotonicity = + monotonic == null + ? null + : monotonic + ? Monotonicity.FULLY_MONOTONIC + : monotonicArgumentNames.isEmpty() + ? Monotonicity.NOT_MONOTONIC + : Monotonicity.PARTIALLY_MONOTONIC; + this.monotonicArgumentNames = ImmutableList.copyOf(monotonicArgumentNames); + } + + @Override + @Deprecated + public boolean isDeterministic() { + return deterministic != null && deterministic; + } + + @Override + public Optional getDeterministic() { + return Optional.ofNullable(deterministic); + } + + @Override + @Deprecated + public boolean isMonotonic() { + return monotonicity == Monotonicity.FULLY_MONOTONIC; + } + + @Override + public Optional getMonotonicity() { + return Optional.ofNullable(monotonicity); + } + + @NonNull + @Override + public List getMonotonicArgumentNames() { + return this.monotonicArgumentNames; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseFunctionMetadata) { + DseFunctionMetadata that = (DseFunctionMetadata) other; + return Objects.equals(this.getKeyspace(), that.getKeyspace()) + && Objects.equals(this.getSignature(), that.getSignature()) + && Objects.equals(this.getParameterNames(), that.getParameterNames()) + && Objects.equals(this.getBody(), that.getBody()) + && this.isCalledOnNullInput() == that.isCalledOnNullInput() + && Objects.equals(this.getLanguage(), that.getLanguage()) + && Objects.equals(this.getReturnType(), that.getReturnType()) + && Objects.equals(this.deterministic, that.getDeterministic().orElse(null)) + && this.monotonicity == that.getMonotonicity().orElse(null) + && Objects.equals(this.monotonicArgumentNames, that.getMonotonicArgumentNames()); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash( + getKeyspace(), + getSignature(), + getParameterNames(), + getBody(), + isCalledOnNullInput(), + getLanguage(), + getReturnType(), + deterministic, + monotonicity, + monotonicArgumentNames); + } + + @Override + public String toString() { + return "Function Name: " + + this.getSignature().getName().asCql(false) + + ", Keyspace: " + + this.getKeyspace().asCql(false) + + ", Language: " + + this.getLanguage() + + ", Return Type: " + + getReturnType().asCql(false, false) + + ", Deterministic: " + + this.deterministic + + ", Monotonicity: " + + this.monotonicity + + ", Monotonic On: " + + (this.monotonicArgumentNames.isEmpty() ? "" : this.monotonicArgumentNames.get(0)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java new file mode 100644 index 00000000000..c66d7934151 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseIndexMetadata.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseIndexMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultIndexMetadata; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseIndexMetadata extends DefaultIndexMetadata implements DseIndexMetadata { + + public DefaultDseIndexMetadata( + @NonNull CqlIdentifier keyspace, + @NonNull CqlIdentifier table, + @NonNull CqlIdentifier name, + @NonNull IndexKind kind, + @NonNull String target, + @NonNull Map options) { + super(keyspace, table, name, kind, target, options); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java new file mode 100644 index 00000000000..8e54c9082e1 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseKeyspaceMetadata.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseGraphKeyspaceMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseKeyspaceMetadata implements DseGraphKeyspaceMetadata, Serializable { + + private static final long serialVersionUID = 1; + + @NonNull private final CqlIdentifier name; + private final boolean durableWrites; + private final boolean virtual; + @Nullable private final String graphEngine; + @NonNull private final Map replication; + @NonNull private final Map types; + @NonNull private final Map tables; + @NonNull private final Map views; + @NonNull private final Map functions; + @NonNull private final Map aggregates; + + public DefaultDseKeyspaceMetadata( + @NonNull CqlIdentifier name, + boolean durableWrites, + boolean virtual, + @Nullable String graphEngine, + @NonNull Map replication, + @NonNull Map types, + @NonNull Map tables, + @NonNull Map views, + @NonNull Map functions, + @NonNull Map aggregates) { + this.name = name; + this.durableWrites = durableWrites; + this.virtual = virtual; + this.graphEngine = graphEngine; + this.replication = replication; + this.types = types; + this.tables = tables; + this.views = views; + this.functions = functions; + this.aggregates = aggregates; + } + + @NonNull + @Override + public CqlIdentifier getName() { + return name; + } + + @Override + public boolean isDurableWrites() { + return durableWrites; + } + + @Override + public boolean isVirtual() { + return virtual; + } + + @NonNull + @Override + public Optional getGraphEngine() { + return Optional.ofNullable(graphEngine); + } + + @NonNull + @Override + public Map getReplication() { + return replication; + } + + @NonNull + @Override + public Map getUserDefinedTypes() { + return types; + } + + @NonNull + @Override + public Map getTables() { + return tables; + } + + @NonNull + @Override + public Map getViews() { + return views; + } + + @NonNull + @Override + public Map getFunctions() { + return functions; + } + + @NonNull + @Override + public Map getAggregates() { + return aggregates; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseGraphKeyspaceMetadata) { + DseGraphKeyspaceMetadata that = (DseGraphKeyspaceMetadata) other; + return Objects.equals(this.name, that.getName()) + && this.durableWrites == that.isDurableWrites() + && this.virtual == that.isVirtual() + && Objects.equals(this.graphEngine, that.getGraphEngine().orElse(null)) + && Objects.equals(this.replication, that.getReplication()) + && Objects.equals(this.types, that.getUserDefinedTypes()) + && Objects.equals(this.tables, that.getTables()) + && Objects.equals(this.views, that.getViews()) + && Objects.equals(this.functions, that.getFunctions()) + && Objects.equals(this.aggregates, that.getAggregates()); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash( + name, + durableWrites, + virtual, + graphEngine, + replication, + types, + tables, + views, + functions, + aggregates); + } + + @Override + public boolean shallowEquals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseGraphKeyspaceMetadata) { + DseGraphKeyspaceMetadata that = (DseGraphKeyspaceMetadata) other; + return Objects.equals(this.name, that.getName()) + && this.durableWrites == that.isDurableWrites() + && Objects.equals(this.graphEngine, that.getGraphEngine().orElse(null)) + && Objects.equals(this.replication, that.getReplication()); + } else { + return false; + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java new file mode 100644 index 00000000000..f8fb8cc10d1 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseTableMetadata.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseGraphTableMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseTableMetadata implements DseGraphTableMetadata, Serializable { + + private static final long serialVersionUID = 1; + + @NonNull private final CqlIdentifier keyspace; + @NonNull private final CqlIdentifier name; + // null for virtual tables + @Nullable private final UUID id; + private final boolean compactStorage; + private final boolean virtual; + @NonNull private final List partitionKey; + @NonNull private final Map clusteringColumns; + @NonNull private final Map columns; + @NonNull private final Map options; + @NonNull private final Map indexes; + @Nullable private final DseVertexMetadata vertex; + @Nullable private final DseEdgeMetadata edge; + + public DefaultDseTableMetadata( + @NonNull CqlIdentifier keyspace, + @NonNull CqlIdentifier name, + @Nullable UUID id, + boolean compactStorage, + boolean virtual, + @NonNull List partitionKey, + @NonNull Map clusteringColumns, + @NonNull Map columns, + @NonNull Map options, + @NonNull Map indexes, + @Nullable DseVertexMetadata vertex, + @Nullable DseEdgeMetadata edge) { + this.keyspace = keyspace; + this.name = name; + this.id = id; + this.compactStorage = compactStorage; + this.virtual = virtual; + this.partitionKey = partitionKey; + this.clusteringColumns = clusteringColumns; + this.columns = columns; + this.options = options; + this.indexes = indexes; + this.vertex = vertex; + this.edge = edge; + } + + @NonNull + @Override + public CqlIdentifier getKeyspace() { + return keyspace; + } + + @NonNull + @Override + public CqlIdentifier getName() { + return name; + } + + @NonNull + @Override + public Optional getId() { + return Optional.ofNullable(id); + } + + @Override + public boolean isCompactStorage() { + return compactStorage; + } + + @Override + public boolean isVirtual() { + return virtual; + } + + @NonNull + @Override + public List getPartitionKey() { + return partitionKey; + } + + @NonNull + @Override + public Map getClusteringColumns() { + return clusteringColumns; + } + + @NonNull + @Override + public Map getColumns() { + return columns; + } + + @NonNull + @Override + public Map getOptions() { + return options; + } + + @NonNull + @Override + public Map getIndexes() { + return indexes; + } + + @NonNull + @Override + public Optional getVertex() { + return Optional.ofNullable(vertex); + } + + @NonNull + @Override + public Optional getEdge() { + return Optional.ofNullable(edge); + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseGraphTableMetadata) { + DseGraphTableMetadata that = (DseGraphTableMetadata) other; + return Objects.equals(this.keyspace, that.getKeyspace()) + && Objects.equals(this.name, that.getName()) + && Objects.equals(this.id, that.getId().orElse(null)) + && this.compactStorage == that.isCompactStorage() + && this.virtual == that.isVirtual() + && Objects.equals(this.partitionKey, that.getPartitionKey()) + && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) + && Objects.equals(this.columns, that.getColumns()) + && Objects.equals(this.indexes, that.getIndexes()) + && Objects.equals(this.vertex, that.getVertex().orElse(null)) + && Objects.equals(this.edge, that.getEdge().orElse(null)); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash( + keyspace, + name, + id, + compactStorage, + virtual, + partitionKey, + clusteringColumns, + columns, + indexes, + vertex, + edge); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java new file mode 100644 index 00000000000..05ba2823704 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseVertexMetadata.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; +import java.util.Objects; + +public class DefaultDseVertexMetadata implements DseVertexMetadata, Serializable { + + private static final long serialVersionUID = 1; + + @NonNull private final CqlIdentifier labelName; + + public DefaultDseVertexMetadata(@NonNull CqlIdentifier labelName) { + this.labelName = Preconditions.checkNotNull(labelName); + } + + @NonNull + @Override + public CqlIdentifier getLabelName() { + return labelName; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DefaultDseVertexMetadata) { + DefaultDseVertexMetadata that = (DefaultDseVertexMetadata) other; + return Objects.equals(this.labelName, that.getLabelName()); + } else { + return false; + } + } + + @Override + public int hashCode() { + return labelName.hashCode(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java new file mode 100644 index 00000000000..f04b7640041 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/DefaultDseViewMetadata.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultDseViewMetadata implements DseViewMetadata, Serializable { + + private static final long serialVersionUID = 1; + + @NonNull private final CqlIdentifier keyspace; + @NonNull private final CqlIdentifier name; + @NonNull private final CqlIdentifier baseTable; + private final boolean includesAllColumns; + @Nullable private final String whereClause; + @NonNull private final UUID id; + @NonNull private final ImmutableList partitionKey; + @NonNull private final ImmutableMap clusteringColumns; + @NonNull private final ImmutableMap columns; + @NonNull private final Map options; + + public DefaultDseViewMetadata( + @NonNull CqlIdentifier keyspace, + @NonNull CqlIdentifier name, + @NonNull CqlIdentifier baseTable, + boolean includesAllColumns, + @Nullable String whereClause, + @NonNull UUID id, + @NonNull ImmutableList partitionKey, + @NonNull ImmutableMap clusteringColumns, + @NonNull ImmutableMap columns, + @NonNull Map options) { + this.keyspace = keyspace; + this.name = name; + this.baseTable = baseTable; + this.includesAllColumns = includesAllColumns; + this.whereClause = whereClause; + this.id = id; + this.partitionKey = partitionKey; + this.clusteringColumns = clusteringColumns; + this.columns = columns; + this.options = options; + } + + @NonNull + @Override + public CqlIdentifier getKeyspace() { + return keyspace; + } + + @NonNull + @Override + public CqlIdentifier getName() { + return name; + } + + @NonNull + @Override + public Optional getId() { + return Optional.of(id); + } + + @NonNull + @Override + public CqlIdentifier getBaseTable() { + return baseTable; + } + + @Override + public boolean includesAllColumns() { + return includesAllColumns; + } + + @NonNull + @Override + public Optional getWhereClause() { + return Optional.ofNullable(whereClause); + } + + @NonNull + @Override + public List getPartitionKey() { + return partitionKey; + } + + @NonNull + @Override + public Map getClusteringColumns() { + return clusteringColumns; + } + + @NonNull + @Override + public Map getColumns() { + return columns; + } + + @NonNull + @Override + public Map getOptions() { + return options; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof DseViewMetadata) { + DseViewMetadata that = (DseViewMetadata) other; + return Objects.equals(this.keyspace, that.getKeyspace()) + && Objects.equals(this.name, that.getName()) + && Objects.equals(this.baseTable, that.getBaseTable()) + && this.includesAllColumns == that.includesAllColumns() + && Objects.equals(this.whereClause, that.getWhereClause().orElse(null)) + && Objects.equals(Optional.of(this.id), that.getId()) + && Objects.equals(this.partitionKey, that.getPartitionKey()) + && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) + && Objects.equals(this.columns, that.getColumns()) + && Objects.equals(this.options, that.getOptions()); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash( + keyspace, + name, + baseTable, + includesAllColumns, + whereClause, + id, + partitionKey, + clusteringColumns, + columns, + options); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java new file mode 100644 index 00000000000..64f6cac19f0 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/ScriptHelper.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; +import java.util.List; + +public class ScriptHelper { + + public static void appendEdgeSide( + ScriptBuilder builder, + CqlIdentifier table, + CqlIdentifier label, + List partitionKeyColumns, + List clusteringColumns, + String keyword) { + builder.append(" ").append(keyword).append(label).append("("); + + if (partitionKeyColumns.size() == 1) { // PRIMARY KEY (k + builder.append(partitionKeyColumns.get(0)); + } else { // PRIMARY KEY ((k1, k2) + builder.append("("); + boolean first = true; + for (CqlIdentifier pkColumn : partitionKeyColumns) { + if (first) { + first = false; + } else { + builder.append(", "); + } + builder.append(pkColumn); + } + builder.append(")"); + } + // PRIMARY KEY (, cc1, cc2, cc3) + for (CqlIdentifier clusteringColumn : clusteringColumns) { + builder.append(", ").append(clusteringColumn); + } + builder.append(")"); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java new file mode 100644 index 00000000000..37a7a2768c2 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseAggregateParser.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema.parsing; + +import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseAggregateMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.AggregateParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; +import java.util.Map; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class DseAggregateParser { + + private final AggregateParser aggregateParser; + private final InternalDriverContext context; + + public DseAggregateParser(DataTypeParser dataTypeParser, InternalDriverContext context) { + this.aggregateParser = new AggregateParser(dataTypeParser, context); + this.context = context; + } + + public DseAggregateMetadata parseAggregate( + AdminRow row, + CqlIdentifier keyspaceId, + Map userDefinedTypes) { + AggregateMetadata aggregate = aggregateParser.parseAggregate(row, keyspaceId, userDefinedTypes); + // parse the DSE extended columns + final Boolean deterministic = + row.contains("deterministic") ? row.getBoolean("deterministic") : null; + + return new DefaultDseAggregateMetadata( + aggregate.getKeyspace(), + aggregate.getSignature(), + aggregate.getFinalFuncSignature().orElse(null), + aggregate.getInitCond().orElse(null), + aggregate.getReturnType(), + aggregate.getStateFuncSignature(), + aggregate.getStateType(), + context.getCodecRegistry().codecFor(aggregate.getStateType()), + deterministic); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java new file mode 100644 index 00000000000..0d88bce8740 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseFunctionParser.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema.parsing; + +import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseFunctionMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.FunctionParser; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class DseFunctionParser { + + private final FunctionParser functionParser; + + public DseFunctionParser(DataTypeParser dataTypeParser, InternalDriverContext context) { + this.functionParser = new FunctionParser(dataTypeParser, context); + } + + public DseFunctionMetadata parseFunction( + AdminRow row, + CqlIdentifier keyspaceId, + Map userDefinedTypes) { + FunctionMetadata function = functionParser.parseFunction(row, keyspaceId, userDefinedTypes); + // parse the DSE extended columns + final Boolean deterministic = + row.contains("deterministic") ? row.getBoolean("deterministic") : null; + final Boolean monotonic = row.contains("monotonic") ? row.getBoolean("monotonic") : null; + // stream the list of strings into a list of CqlIdentifiers + final List monotonicOn = + row.contains("monotonic_on") + ? row.getListOfString("monotonic_on").stream() + .map(CqlIdentifier::fromInternal) + .collect(Collectors.toList()) + : Collections.emptyList(); + + return new DefaultDseFunctionMetadata( + function.getKeyspace(), + function.getSignature(), + function.getParameterNames(), + function.getBody(), + function.isCalledOnNullInput(), + function.getLanguage(), + function.getReturnType(), + deterministic, + monotonic, + monotonicOn); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java new file mode 100644 index 00000000000..ca7fb74a746 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseSchemaParser.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema.parsing; + +import com.datastax.dse.driver.api.core.metadata.schema.DseAggregateMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseKeyspaceMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseTableMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.CassandraSchemaParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SimpleJsonParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.UserDefinedTypeParser; +import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; +import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; +import com.datastax.oss.driver.internal.core.util.NanoTime; +import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; +import java.util.Collections; +import java.util.Map; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Default parser implementation for DSE. + * + *

For modularity, the code for each element row is split into separate classes (schema stuff is + * not on the hot path, so creating a few extra objects doesn't matter). + */ +@ThreadSafe +public class DseSchemaParser implements SchemaParser { + + private static final Logger LOG = LoggerFactory.getLogger(CassandraSchemaParser.class); + + private final SchemaRows rows; + private final UserDefinedTypeParser userDefinedTypeParser; + private final DseTableParser tableParser; + private final DseViewParser viewParser; + private final DseFunctionParser functionParser; + private final DseAggregateParser aggregateParser; + private final String logPrefix; + private final long startTimeNs = System.nanoTime(); + + public DseSchemaParser(SchemaRows rows, InternalDriverContext context) { + this.rows = rows; + this.logPrefix = context.getSessionName(); + + this.userDefinedTypeParser = new UserDefinedTypeParser(rows.dataTypeParser(), context); + this.tableParser = new DseTableParser(rows, context); + this.viewParser = new DseViewParser(rows, context); + this.functionParser = new DseFunctionParser(rows.dataTypeParser(), context); + this.aggregateParser = new DseAggregateParser(rows.dataTypeParser(), context); + } + + @Override + public SchemaRefresh parse() { + ImmutableMap.Builder keyspacesBuilder = ImmutableMap.builder(); + for (AdminRow row : rows.keyspaces()) { + DseKeyspaceMetadata keyspace = parseKeyspace(row); + keyspacesBuilder.put(keyspace.getName(), keyspace); + } + for (AdminRow row : rows.virtualKeyspaces()) { + DseKeyspaceMetadata keyspace = parseVirtualKeyspace(row); + keyspacesBuilder.put(keyspace.getName(), keyspace); + } + SchemaRefresh refresh = new SchemaRefresh(keyspacesBuilder.build()); + LOG.debug("[{}] Schema parsing took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); + return refresh; + } + + private DseKeyspaceMetadata parseKeyspace(AdminRow keyspaceRow) { + + // Cassandra <= 2.2 + // CREATE TABLE system.schema_keyspaces ( + // keyspace_name text PRIMARY KEY, + // durable_writes boolean, + // strategy_class text, + // strategy_options text + // ) + // + // Cassandra >= 3.0: + // CREATE TABLE system_schema.keyspaces ( + // keyspace_name text PRIMARY KEY, + // durable_writes boolean, + // replication frozen> + // ) + // + // DSE >= 6.8: same as Cassandra 3 + graph_engine text + CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); + boolean durableWrites = + MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); + String graphEngine = keyspaceRow.getString("graph_engine"); + + Map replicationOptions; + if (keyspaceRow.contains("strategy_class")) { + String strategyClass = keyspaceRow.getString("strategy_class"); + Map strategyOptions = + SimpleJsonParser.parseStringMap(keyspaceRow.getString("strategy_options")); + replicationOptions = + ImmutableMap.builder() + .putAll(strategyOptions) + .put("class", strategyClass) + .build(); + } else { + replicationOptions = keyspaceRow.getMapOfStringToString("replication"); + } + + Map types = parseTypes(keyspaceId); + + return new DefaultDseKeyspaceMetadata( + keyspaceId, + durableWrites, + false, + graphEngine, + replicationOptions, + types, + parseTables(keyspaceId, types), + parseViews(keyspaceId, types), + parseFunctions(keyspaceId, types), + parseAggregates(keyspaceId, types)); + } + + private Map parseTypes(CqlIdentifier keyspaceId) { + return userDefinedTypeParser.parse(rows.types().get(keyspaceId), keyspaceId); + } + + private Map parseTables( + CqlIdentifier keyspaceId, Map types) { + ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); + Multimap vertices = rows.vertices().get(keyspaceId); + Multimap edges = rows.edges().get(keyspaceId); + for (AdminRow tableRow : rows.tables().get(keyspaceId)) { + DseTableMetadata table = tableParser.parseTable(tableRow, keyspaceId, types, vertices, edges); + if (table != null) { + tablesBuilder.put(table.getName(), table); + } + } + return tablesBuilder.build(); + } + + private Map parseViews( + CqlIdentifier keyspaceId, Map types) { + ImmutableMap.Builder viewsBuilder = ImmutableMap.builder(); + for (AdminRow viewRow : rows.views().get(keyspaceId)) { + DseViewMetadata view = viewParser.parseView(viewRow, keyspaceId, types); + if (view != null) { + viewsBuilder.put(view.getName(), view); + } + } + return viewsBuilder.build(); + } + + private Map parseFunctions( + CqlIdentifier keyspaceId, Map types) { + ImmutableMap.Builder functionsBuilder = + ImmutableMap.builder(); + for (AdminRow functionRow : rows.functions().get(keyspaceId)) { + DseFunctionMetadata function = functionParser.parseFunction(functionRow, keyspaceId, types); + if (function != null) { + functionsBuilder.put(function.getSignature(), function); + } + } + return functionsBuilder.build(); + } + + private Map parseAggregates( + CqlIdentifier keyspaceId, Map types) { + ImmutableMap.Builder aggregatesBuilder = + ImmutableMap.builder(); + for (AdminRow aggregateRow : rows.aggregates().get(keyspaceId)) { + DseAggregateMetadata aggregate = + aggregateParser.parseAggregate(aggregateRow, keyspaceId, types); + if (aggregate != null) { + aggregatesBuilder.put(aggregate.getSignature(), aggregate); + } + } + return aggregatesBuilder.build(); + } + + private DseKeyspaceMetadata parseVirtualKeyspace(AdminRow keyspaceRow) { + + CqlIdentifier keyspaceId = CqlIdentifier.fromInternal(keyspaceRow.getString("keyspace_name")); + boolean durableWrites = + MoreObjects.firstNonNull(keyspaceRow.getBoolean("durable_writes"), false); + + return new DefaultDseKeyspaceMetadata( + keyspaceId, + durableWrites, + true, + null, + Collections.emptyMap(), + Collections.emptyMap(), + parseVirtualTables(keyspaceId), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap()); + } + + private Map parseVirtualTables(CqlIdentifier keyspaceId) { + ImmutableMap.Builder tablesBuilder = ImmutableMap.builder(); + for (AdminRow tableRow : rows.virtualTables().get(keyspaceId)) { + DseTableMetadata table = tableParser.parseVirtualTable(tableRow, keyspaceId); + if (table != null) { + tablesBuilder.put(table.getName(), table); + } + } + return tablesBuilder.build(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java new file mode 100644 index 00000000000..7fd4a5f0167 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseTableParser.java @@ -0,0 +1,425 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema.parsing; + +import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseIndexMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseTableMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseColumnMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseEdgeMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseIndexMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseVertexMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; +import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.ListType; +import com.datastax.oss.driver.api.core.type.MapType; +import com.datastax.oss.driver.api.core.type.SetType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.CqlIdentifiers; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameCompositeParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RawColumn; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.TableParser; +import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; +import com.datastax.oss.driver.internal.core.util.Loggers; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; +import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class DseTableParser extends RelationParser { + + private static final Logger LOG = LoggerFactory.getLogger(TableParser.class); + + public DseTableParser(SchemaRows rows, InternalDriverContext context) { + super(rows, context); + } + + public DseTableMetadata parseTable( + AdminRow tableRow, + CqlIdentifier keyspaceId, + Map userTypes, + Multimap vertices, + Multimap edges) { + // Cassandra <= 2.2: + // CREATE TABLE system.schema_columnfamilies ( + // keyspace_name text, + // columnfamily_name text, + // bloom_filter_fp_chance double, + // caching text, + // cf_id uuid, + // column_aliases text, (2.1 only) + // comment text, + // compaction_strategy_class text, + // compaction_strategy_options text, + // comparator text, + // compression_parameters text, + // default_time_to_live int, + // default_validator text, + // dropped_columns map, + // gc_grace_seconds int, + // index_interval int, + // is_dense boolean, (2.1 only) + // key_aliases text, (2.1 only) + // key_validator text, + // local_read_repair_chance double, + // max_compaction_threshold int, + // max_index_interval int, + // memtable_flush_period_in_ms int, + // min_compaction_threshold int, + // min_index_interval int, + // read_repair_chance double, + // speculative_retry text, + // subcomparator text, + // type text, + // value_alias text, (2.1 only) + // PRIMARY KEY (keyspace_name, columnfamily_name) + // ) WITH CLUSTERING ORDER BY (columnfamily_name ASC) + // + // Cassandra 3.0: + // CREATE TABLE system_schema.tables ( + // keyspace_name text, + // table_name text, + // bloom_filter_fp_chance double, + // caching frozen>, + // cdc boolean, + // comment text, + // compaction frozen>, + // compression frozen>, + // crc_check_chance double, + // dclocal_read_repair_chance double, + // default_time_to_live int, + // extensions frozen>, + // flags frozen>, + // gc_grace_seconds int, + // id uuid, + // max_index_interval int, + // memtable_flush_period_in_ms int, + // min_index_interval int, + // read_repair_chance double, + // speculative_retry text, + // PRIMARY KEY (keyspace_name, table_name) + // ) WITH CLUSTERING ORDER BY (table_name ASC) + CqlIdentifier tableId = + CqlIdentifier.fromInternal( + tableRow.getString( + tableRow.contains("table_name") ? "table_name" : "columnfamily_name")); + + UUID uuid = tableRow.contains("id") ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); + + List rawColumns = + RawColumn.toRawColumns( + rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); + if (rawColumns.isEmpty()) { + LOG.warn( + "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", + logPrefix, + keyspaceId, + tableId); + return null; + } + + boolean isCompactStorage; + if (tableRow.contains("flags")) { + Set flags = tableRow.getSetOfString("flags"); + boolean isDense = flags.contains("dense"); + boolean isSuper = flags.contains("super"); + boolean isCompound = flags.contains("compound"); + isCompactStorage = isSuper || isDense || !isCompound; + boolean isStaticCompact = !isSuper && !isDense && !isCompound; + if (isStaticCompact) { + RawColumn.pruneStaticCompactTableColumns(rawColumns); + } else if (isDense) { + RawColumn.pruneDenseTableColumnsV3(rawColumns); + } + } else { + boolean isDense = tableRow.getBoolean("is_dense"); + if (isDense) { + RawColumn.pruneDenseTableColumnsV2(rawColumns); + } + DataTypeClassNameCompositeParser.ParseResult comparator = + new DataTypeClassNameCompositeParser() + .parseWithComposite(tableRow.getString("comparator"), keyspaceId, userTypes, context); + isCompactStorage = isDense || !comparator.isComposite; + } + + Collections.sort(rawColumns); + ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); + ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); + ImmutableMap.Builder clusteringColumnsBuilder = + ImmutableMap.builder(); + ImmutableMap.Builder indexesBuilder = ImmutableMap.builder(); + + for (RawColumn raw : rawColumns) { + DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); + DseColumnMetadata column = + new DefaultDseColumnMetadata( + keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); + switch (raw.kind) { + case RawColumn.KIND_PARTITION_KEY: + partitionKeyBuilder.add(column); + break; + case RawColumn.KIND_CLUSTERING_COLUMN: + clusteringColumnsBuilder.put( + column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); + break; + default: + // nothing to do + } + allColumnsBuilder.put(column.getName(), column); + + DseIndexMetadata index = buildLegacyIndex(raw, column); + if (index != null) { + indexesBuilder.put(index.getName(), index); + } + } + + Map options; + try { + options = parseOptions(tableRow); + } catch (Exception e) { + // Options change the most often, so be especially lenient if anything goes wrong. + Loggers.warnWithException( + LOG, + "[{}] Error while parsing options for {}.{}, getOptions() will be empty", + logPrefix, + keyspaceId, + tableId, + e); + options = Collections.emptyMap(); + } + + Collection indexRows = + rows.indexes().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId); + for (AdminRow indexRow : indexRows) { + DseIndexMetadata index = buildModernIndex(keyspaceId, tableId, indexRow); + indexesBuilder.put(index.getName(), index); + } + + return new DefaultDseTableMetadata( + keyspaceId, + tableId, + uuid, + isCompactStorage, + false, + partitionKeyBuilder.build(), + clusteringColumnsBuilder.build(), + allColumnsBuilder.build(), + options, + indexesBuilder.build(), + buildVertex(tableId, vertices), + buildEdge(tableId, edges, vertices)); + } + + DseTableMetadata parseVirtualTable(AdminRow tableRow, CqlIdentifier keyspaceId) { + + CqlIdentifier tableId = CqlIdentifier.fromInternal(tableRow.getString("table_name")); + + List rawColumns = + RawColumn.toRawColumns( + rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); + if (rawColumns.isEmpty()) { + LOG.warn( + "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", + logPrefix, + keyspaceId, + tableId); + return null; + } + + Collections.sort(rawColumns); + ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); + ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); + ImmutableMap.Builder clusteringColumnsBuilder = + ImmutableMap.builder(); + + for (RawColumn raw : rawColumns) { + DataType dataType = + rows.dataTypeParser().parse(keyspaceId, raw.dataType, Collections.emptyMap(), context); + DseColumnMetadata column = + new DefaultDseColumnMetadata( + keyspaceId, tableId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); + switch (raw.kind) { + case RawColumn.KIND_PARTITION_KEY: + partitionKeyBuilder.add(column); + break; + case RawColumn.KIND_CLUSTERING_COLUMN: + clusteringColumnsBuilder.put( + column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); + break; + default: + } + + allColumnsBuilder.put(column.getName(), column); + } + + return new DefaultDseTableMetadata( + keyspaceId, + tableId, + null, + false, + true, + partitionKeyBuilder.build(), + clusteringColumnsBuilder.build(), + allColumnsBuilder.build(), + Collections.emptyMap(), + Collections.emptyMap(), + null, + null); + } + + // In C*<=2.2, index information is stored alongside the column. + private DseIndexMetadata buildLegacyIndex(RawColumn raw, ColumnMetadata column) { + if (raw.indexName == null) { + return null; + } + return new DefaultDseIndexMetadata( + column.getKeyspace(), + column.getParent(), + CqlIdentifier.fromInternal(raw.indexName), + IndexKind.valueOf(raw.indexType), + buildLegacyIndexTarget(column, raw.indexOptions), + raw.indexOptions); + } + + private static String buildLegacyIndexTarget(ColumnMetadata column, Map options) { + String columnName = column.getName().asCql(true); + DataType columnType = column.getType(); + if (options.containsKey("index_keys")) { + return String.format("keys(%s)", columnName); + } + if (options.containsKey("index_keys_and_values")) { + return String.format("entries(%s)", columnName); + } + if ((columnType instanceof ListType && ((ListType) columnType).isFrozen()) + || (columnType instanceof SetType && ((SetType) columnType).isFrozen()) + || (columnType instanceof MapType && ((MapType) columnType).isFrozen())) { + return String.format("full(%s)", columnName); + } + // Note: the keyword 'values' is not accepted as a valid index target function until 3.0 + return columnName; + } + + // In C*>=3.0, index information is stored in a dedicated table: + // CREATE TABLE system_schema.indexes ( + // keyspace_name text, + // table_name text, + // index_name text, + // kind text, + // options frozen>, + // PRIMARY KEY (keyspace_name, table_name, index_name) + // ) WITH CLUSTERING ORDER BY (table_name ASC, index_name ASC) + private DseIndexMetadata buildModernIndex( + CqlIdentifier keyspaceId, CqlIdentifier tableId, AdminRow row) { + CqlIdentifier name = CqlIdentifier.fromInternal(row.getString("index_name")); + IndexKind kind = IndexKind.valueOf(row.getString("kind")); + Map options = row.getMapOfStringToString("options"); + String target = options.get("target"); + return new DefaultDseIndexMetadata(keyspaceId, tableId, name, kind, target, options); + } + + private DseVertexMetadata buildVertex( + CqlIdentifier tableId, Multimap keyspaceVertices) { + + if (keyspaceVertices == null) { + return null; + } + Collection tableVertices = keyspaceVertices.get(tableId); + if (tableVertices == null || tableVertices.isEmpty()) { + return null; + } + + AdminRow row = tableVertices.iterator().next(); + return new DefaultDseVertexMetadata(getLabel(row)); + } + + private DseEdgeMetadata buildEdge( + CqlIdentifier tableId, + Multimap keyspaceEdges, + Multimap keyspaceVertices) { + + if (keyspaceEdges == null) { + return null; + } + + Collection tableEdges = keyspaceEdges.get(tableId); + if (tableEdges == null || tableEdges.isEmpty()) { + return null; + } + + AdminRow row = tableEdges.iterator().next(); + + CqlIdentifier fromTable = CqlIdentifier.fromInternal(row.getString("from_table")); + + CqlIdentifier toTable = CqlIdentifier.fromInternal(row.getString("to_table")); + + return new DefaultDseEdgeMetadata( + getLabel(row), + fromTable, + findVertexLabel(fromTable, keyspaceVertices, "incoming"), + CqlIdentifiers.wrapInternal(row.getListOfString("from_partition_key_columns")), + CqlIdentifiers.wrapInternal(row.getListOfString("from_clustering_columns")), + toTable, + findVertexLabel(toTable, keyspaceVertices, "outgoing"), + CqlIdentifiers.wrapInternal(row.getListOfString("to_partition_key_columns")), + CqlIdentifiers.wrapInternal(row.getListOfString("to_clustering_columns"))); + } + + private CqlIdentifier getLabel(AdminRow row) { + String rawLabel = row.getString("label_name"); + return (rawLabel == null || rawLabel.isEmpty()) ? null : CqlIdentifier.fromInternal(rawLabel); + } + + // system_schema.edges only contains vertex table names. We also expose the labels in our metadata + // objects, so we need to look them up in system_schema.vertices. + private CqlIdentifier findVertexLabel( + CqlIdentifier table, + Multimap keyspaceVertices, + String directionForErrorMessage) { + Collection tableVertices = + (keyspaceVertices == null) ? null : keyspaceVertices.get(table); + if (tableVertices == null || tableVertices.isEmpty()) { + throw new IllegalArgumentException( + String.format( + "Missing vertex definition for %s table %s", + directionForErrorMessage, table.asCql(true))); + } + + AdminRow row = tableVertices.iterator().next(); + return getLabel(row); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java new file mode 100644 index 00000000000..07a1e2b5c39 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/metadata/schema/parsing/DseViewParser.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.metadata.schema.parsing; + +import com.datastax.dse.driver.api.core.metadata.schema.DseColumnMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseViewMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseColumnMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseViewMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RawColumn; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; +import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; +import com.datastax.oss.driver.internal.core.util.Loggers; +import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class DseViewParser extends RelationParser { + + private static final Logger LOG = LoggerFactory.getLogger(DseViewParser.class); + + public DseViewParser(SchemaRows rows, InternalDriverContext context) { + super(rows, context); + } + + public DseViewMetadata parseView( + AdminRow viewRow, CqlIdentifier keyspaceId, Map userTypes) { + // Cassandra 3.0 (no views in earlier versions): + // CREATE TABLE system_schema.views ( + // keyspace_name text, + // view_name text, + // base_table_id uuid, + // base_table_name text, + // bloom_filter_fp_chance double, + // caching frozen>, + // cdc boolean, + // comment text, + // compaction frozen>, + // compression frozen>, + // crc_check_chance double, + // dclocal_read_repair_chance double, + // default_time_to_live int, + // extensions frozen>, + // gc_grace_seconds int, + // id uuid, + // include_all_columns boolean, + // max_index_interval int, + // memtable_flush_period_in_ms int, + // min_index_interval int, + // read_repair_chance double, + // speculative_retry text, + // where_clause text, + // PRIMARY KEY (keyspace_name, view_name) + // ) WITH CLUSTERING ORDER BY (view_name ASC) + CqlIdentifier viewId = CqlIdentifier.fromInternal(viewRow.getString("view_name")); + + UUID uuid = viewRow.getUuid("id"); + CqlIdentifier baseTableId = CqlIdentifier.fromInternal(viewRow.getString("base_table_name")); + boolean includesAllColumns = + MoreObjects.firstNonNull(viewRow.getBoolean("include_all_columns"), false); + String whereClause = viewRow.getString("where_clause"); + + List rawColumns = + RawColumn.toRawColumns( + rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId)); + if (rawColumns.isEmpty()) { + LOG.warn( + "[{}] Processing VIEW refresh for {}.{} but found no matching rows, skipping", + logPrefix, + keyspaceId, + viewId); + return null; + } + + Collections.sort(rawColumns); + ImmutableMap.Builder allColumnsBuilder = ImmutableMap.builder(); + ImmutableList.Builder partitionKeyBuilder = ImmutableList.builder(); + ImmutableMap.Builder clusteringColumnsBuilder = + ImmutableMap.builder(); + + for (RawColumn raw : rawColumns) { + DataType dataType = rows.dataTypeParser().parse(keyspaceId, raw.dataType, userTypes, context); + DseColumnMetadata column = + new DefaultDseColumnMetadata( + keyspaceId, viewId, raw.name, dataType, raw.kind.equals(RawColumn.KIND_STATIC)); + switch (raw.kind) { + case RawColumn.KIND_PARTITION_KEY: + partitionKeyBuilder.add(column); + break; + case RawColumn.KIND_CLUSTERING_COLUMN: + clusteringColumnsBuilder.put( + column, raw.reversed ? ClusteringOrder.DESC : ClusteringOrder.ASC); + break; + default: + // nothing to do + } + allColumnsBuilder.put(column.getName(), column); + } + + Map options; + try { + options = parseOptions(viewRow); + } catch (Exception e) { + // Options change the most often, so be especially lenient if anything goes wrong. + Loggers.warnWithException( + LOG, + "[{}] Error while parsing options for {}.{}, getOptions() will be empty", + logPrefix, + keyspaceId, + viewId, + e); + options = Collections.emptyMap(); + } + + return new DefaultDseViewMetadata( + keyspaceId, + viewId, + baseTableId, + includesAllColumns, + whereClause, + uuid, + partitionKeyBuilder.build(), + clusteringColumnsBuilder.build(), + allColumnsBuilder.build(), + options); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java new file mode 100644 index 00000000000..13238519e06 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodec.java @@ -0,0 +1,292 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.protocol; + +import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; +import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import com.datastax.oss.protocol.internal.PrimitiveCodec; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.zip.CRC32; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; + +/** + * Minimal implementation of {@link PrimitiveCodec} for Tinkerpop {@link Buffer} instances. + * + *

This approach represents a temporary design compromise. PrimitiveCodec is primarily used for + * handling data directly from Netty, a task satisfied by {@link ByteBufPrimitiveCodec}. But + * PrimitiveCodec is also used to implement graph serialization for some of the "dynamic" types + * (notably UDTs and tuples). Since we're converting graph serialization to use the new Tinkerpop + * Buffer API we need just enough of a PrimitiveCodec impl to satisfy the needs of graph + * serialization... and nothing more. + * + *

A more explicit approach would be to change graph serialization to use a different interface, + * some kind of subset of PrimitiveCodec.... and then make PrimitiveCodec extend this interface. + * This is left as future work for now since it involves changes to the native-protocol lib(s). + */ +public class TinkerpopBufferPrimitiveCodec implements PrimitiveCodec { + + private final DseNettyBufferFactory factory; + + public TinkerpopBufferPrimitiveCodec(DseNettyBufferFactory factory) { + this.factory = factory; + } + + @Override + public Buffer allocate(int size) { + // Note: we use io() here to match up to what ByteBufPrimitiveCodec does, but be warned that + // ByteBufs created in this way don't support the array() method used elsewhere in this codec + // (readString() specifically). As such usage of this method to create Buffer instances is + // discouraged; we have a factory for that. + return this.factory.io(size, size); + } + + @Override + public void release(Buffer toRelease) { + toRelease.release(); + } + + @Override + public int sizeOf(Buffer toMeasure) { + return toMeasure.readableBytes(); + } + + // TODO + @Override + public Buffer concat(Buffer left, Buffer right) { + boolean leftReadable = left.readableBytes() > 0; + boolean rightReadable = right.readableBytes() > 0; + if (!(leftReadable || rightReadable)) { + return factory.heap(); + } + if (!leftReadable) { + return right; + } + if (!rightReadable) { + return left; + } + Buffer rv = factory.composite(left, right); + // c.readerIndex() is 0, which is the first readable byte in left + rv.writerIndex( + left.writerIndex() - left.readerIndex() + right.writerIndex() - right.readerIndex()); + return rv; + } + + @Override + public void markReaderIndex(Buffer source) { + throw new UnsupportedOperationException(); + } + + @Override + public void resetReaderIndex(Buffer source) { + throw new UnsupportedOperationException(); + } + + @Override + public byte readByte(Buffer source) { + return source.readByte(); + } + + @Override + public int readInt(Buffer source) { + return source.readInt(); + } + + @Override + public int readInt(Buffer source, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public InetAddress readInetAddr(Buffer source) { + int length = readByte(source) & 0xFF; + byte[] bytes = new byte[length]; + source.readBytes(bytes); + return newInetAddress(bytes); + } + + @Override + public long readLong(Buffer source) { + return source.readLong(); + } + + @Override + public int readUnsignedShort(Buffer source) { + return source.readShort() & 0xFFFF; + } + + @Override + public ByteBuffer readBytes(Buffer source) { + int length = readInt(source); + if (length < 0) return null; + return source.nioBuffer(source.readerIndex(), length); + } + + @Override + public byte[] readShortBytes(Buffer source) { + try { + int length = readUnsignedShort(source); + byte[] bytes = new byte[length]; + source.readBytes(bytes); + return bytes; + } catch (IndexOutOfBoundsException e) { + throw new IllegalArgumentException( + "Not enough bytes to read a byte array preceded by its 2 bytes length"); + } + } + + // Copy of PrimitiveCodec impl + @Override + public String readString(Buffer source) { + int length = readUnsignedShort(source); + return readString(source, length); + } + + @Override + public String readLongString(Buffer source) { + int length = readInt(source); + return readString(source, length); + } + + @Override + public Buffer readRetainedSlice(Buffer source, int sliceLength) { + throw new UnsupportedOperationException(); + } + + @Override + public void updateCrc(Buffer source, CRC32 crc) { + throw new UnsupportedOperationException(); + } + + @Override + public void writeByte(byte b, Buffer dest) { + dest.writeByte(b); + } + + @Override + public void writeInt(int i, Buffer dest) { + dest.writeInt(i); + } + + @Override + public void writeInetAddr(InetAddress address, Buffer dest) { + byte[] bytes = address.getAddress(); + writeByte((byte) bytes.length, dest); + dest.writeBytes(bytes); + } + + @Override + public void writeLong(long l, Buffer dest) { + dest.writeLong(l); + } + + @Override + public void writeUnsignedShort(int i, Buffer dest) { + dest.writeShort(i); + } + + // Copy of PrimitiveCodec impl + @Override + public void writeString(String s, Buffer dest) { + + byte[] bytes = s.getBytes(Charsets.UTF_8); + writeUnsignedShort(bytes.length, dest); + dest.writeBytes(bytes); + } + + @Override + public void writeLongString(String s, Buffer dest) { + byte[] bytes = s.getBytes(Charsets.UTF_8); + writeInt(bytes.length, dest); + dest.writeBytes(bytes); + } + + @Override + public void writeBytes(ByteBuffer bytes, Buffer dest) { + if (bytes == null) { + writeInt(-1, dest); + } else { + writeInt(bytes.remaining(), dest); + dest.writeBytes(bytes.duplicate()); + } + } + + @Override + public void writeBytes(byte[] bytes, Buffer dest) { + if (bytes == null) { + writeInt(-1, dest); + } else { + writeInt(bytes.length, dest); + dest.writeBytes(bytes); + } + } + + @Override + public void writeShortBytes(byte[] bytes, Buffer dest) { + writeUnsignedShort(bytes.length, dest); + dest.writeBytes(bytes); + } + + // Based on PrimitiveCodec impl, although that method leverages some + // Netty built-ins which we have to do manually here + private static String readString(Buffer buff, int length) { + try { + + // Basically what io.netty.buffer.ByteBufUtil.decodeString() does minus some extra + // ByteBuf-specific ops + int offset; + byte[] bytes; + ByteBuffer byteBuff = buff.nioBuffer(); + if (byteBuff.hasArray()) { + + bytes = byteBuff.array(); + offset = byteBuff.arrayOffset(); + } else { + + bytes = new byte[length]; + byteBuff.get(bytes, 0, length); + offset = 0; + } + + String str = new String(bytes, offset, length, Charsets.UTF_8); + + // Ops against the NIO buffers don't impact the read/write indexes for he Buffer + // itself so we have to do that manually + buff.readerIndex(buff.readerIndex() + length); + return str; + } catch (IndexOutOfBoundsException e) { + throw new IllegalArgumentException( + "Not enough bytes to read an UTF-8 serialized string of size " + length, e); + } + } + + // TODO: Code below copied directly from ByteBufPrimitiveCodec, probably want to consolidate this + // somewhere + private static InetAddress newInetAddress(byte[] bytes) { + try { + return InetAddress.getByAddress(bytes); + } catch (UnknownHostException e) { + // Per the Javadoc, the only way this can happen is if the length is illegal + throw new IllegalArgumentException( + String.format("Invalid address length: %d (%s)", bytes.length, Arrays.toString(bytes))); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java b/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java new file mode 100644 index 00000000000..15e278260c5 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/search/DateRangeUtil.java @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.search; + +import com.datastax.dse.driver.api.core.data.time.DateRangePrecision; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.text.ParseException; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalAdjusters; +import java.util.Calendar; +import java.util.Locale; +import java.util.Map; +import java.util.TimeZone; + +public class DateRangeUtil { + + /** Sets all the fields smaller than the given unit to their lowest possible value. */ + @NonNull + public static ZonedDateTime roundDown(@NonNull ZonedDateTime date, @NonNull ChronoUnit unit) { + switch (unit) { + case YEARS: + return date.with(TemporalAdjusters.firstDayOfYear()).truncatedTo(ChronoUnit.DAYS); + case MONTHS: + return date.with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS); + case DAYS: + case HOURS: + case MINUTES: + case SECONDS: + case MILLIS: + return date.truncatedTo(unit); + default: + throw new IllegalArgumentException("Unsupported unit for rounding: " + unit); + } + } + + /** Sets all the fields smaller than the given unit to their highest possible value. */ + @NonNull + public static ZonedDateTime roundUp(@NonNull ZonedDateTime date, @NonNull ChronoUnit unit) { + return roundDown(date, unit) + .plus(1, unit) + // Even though ZDT has nanosecond-precision, DSE only rounds to millisecond precision so be + // consistent with that + .minus(1, ChronoUnit.MILLIS); + } + + /** + * Parses the given string as a date in a range bound. + * + *

This method deliberately uses legacy time APIs, in order to be as close as possible to the + * server-side parsing logic. We want the client to behave exactly like the server, i.e. parsing a + * date locally and inlining it in a CQL query should always yield the same result as binding the + * date as a value. + */ + public static Calendar parseCalendar(String source) throws ParseException { + // The contents of this method are based on Lucene's DateRangePrefixTree#parseCalendar, released + // under the Apache License, Version 2.0. + // Following is the original notice from that file: + + // Licensed to the Apache Software Foundation (ASF) under one or more + // contributor license agreements. See the NOTICE file distributed with + // this work for additional information regarding copyright ownership. + // The ASF licenses this file to You under the Apache License, Version 2.0 + // (the "License"); you may not use this file except in compliance with + // the License. You may obtain a copy of the License at + // + // http://www.apache.org/licenses/LICENSE-2.0 + // + // Unless required by applicable law or agreed to in writing, software + // distributed under the License is distributed on an "AS IS" BASIS, + // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + // See the License for the specific language governing permissions and + // limitations under the License. + + if (source == null || source.isEmpty()) { + throw new IllegalArgumentException("Can't parse a null or blank string"); + } + + Calendar calendar = newCalendar(); + if (source.equals("*")) { + return calendar; + } + int offset = 0; // a pointer + try { + // year & era: + int lastOffset = + (source.charAt(source.length() - 1) == 'Z') ? source.length() - 1 : source.length(); + int hyphenIdx = source.indexOf('-', 1); // look past possible leading hyphen + if (hyphenIdx < 0) { + hyphenIdx = lastOffset; + } + int year = Integer.parseInt(source.substring(offset, hyphenIdx)); + calendar.set(Calendar.ERA, year <= 0 ? 0 : 1); + calendar.set(Calendar.YEAR, year <= 0 ? -1 * year + 1 : year); + offset = hyphenIdx + 1; + if (lastOffset < offset) { + return calendar; + } + + // NOTE: We aren't validating separator chars, and we unintentionally accept leading +/-. + // The str.substring()'s hopefully get optimized to be stack-allocated. + + // month: + calendar.set( + Calendar.MONTH, + Integer.parseInt(source.substring(offset, offset + 2)) - 1); // starts at 0 + offset += 3; + if (lastOffset < offset) { + return calendar; + } + // day: + calendar.set(Calendar.DAY_OF_MONTH, Integer.parseInt(source.substring(offset, offset + 2))); + offset += 3; + if (lastOffset < offset) { + return calendar; + } + // hour: + calendar.set(Calendar.HOUR_OF_DAY, Integer.parseInt(source.substring(offset, offset + 2))); + offset += 3; + if (lastOffset < offset) { + return calendar; + } + // minute: + calendar.set(Calendar.MINUTE, Integer.parseInt(source.substring(offset, offset + 2))); + offset += 3; + if (lastOffset < offset) { + return calendar; + } + // second: + calendar.set(Calendar.SECOND, Integer.parseInt(source.substring(offset, offset + 2))); + offset += 3; + if (lastOffset < offset) { + return calendar; + } + // ms: + calendar.set(Calendar.MILLISECOND, Integer.parseInt(source.substring(offset, offset + 3))); + offset += 3; // last one, move to next char + if (lastOffset == offset) { + return calendar; + } + } catch (Exception e) { + ParseException pe = new ParseException("Improperly formatted date: " + source, offset); + pe.initCause(e); + throw pe; + } + throw new ParseException("Improperly formatted date: " + source, offset); + } + + private static Calendar newCalendar() { + Calendar calendar = Calendar.getInstance(UTC, Locale.ROOT); + calendar.clear(); + return calendar; + } + + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + /** + * Returns the precision of a calendar obtained through {@link #parseCalendar(String)}, or {@code + * null} if no field was set. + */ + @Nullable + public static DateRangePrecision getPrecision(Calendar calendar) { + DateRangePrecision lastPrecision = null; + for (Map.Entry entry : FIELD_BY_PRECISION.entrySet()) { + DateRangePrecision precision = entry.getKey(); + int field = entry.getValue(); + if (calendar.isSet(field)) { + lastPrecision = precision; + } else { + break; + } + } + return lastPrecision; + } + + // Note: this could be a field on DateRangePrecision, but it's only used within this class so it's + // better not to expose it. + private static final ImmutableMap FIELD_BY_PRECISION = + ImmutableMap.builder() + .put(DateRangePrecision.YEAR, Calendar.YEAR) + .put(DateRangePrecision.MONTH, Calendar.MONTH) + .put(DateRangePrecision.DAY, Calendar.DAY_OF_MONTH) + .put(DateRangePrecision.HOUR, Calendar.HOUR_OF_DAY) + .put(DateRangePrecision.MINUTE, Calendar.MINUTE) + .put(DateRangePrecision.SECOND, Calendar.SECOND) + .put(DateRangePrecision.MILLISECOND, Calendar.MILLISECOND) + .build(); + + public static ZonedDateTime toZonedDateTime(Calendar calendar) { + int year = calendar.get(Calendar.YEAR); + if (calendar.get(Calendar.ERA) == 0) { + // BC era; 1 BC == 0 AD, 0 BD == -1 AD, etc + year -= 1; + if (year > 0) { + year = -year; + } + } + LocalDateTime localDateTime = + LocalDateTime.of( + year, + calendar.get(Calendar.MONTH) + 1, + calendar.get(Calendar.DAY_OF_MONTH), + calendar.get(Calendar.HOUR_OF_DAY), + calendar.get(Calendar.MINUTE), + calendar.get(Calendar.SECOND)); + localDateTime = + localDateTime.with(ChronoField.MILLI_OF_SECOND, calendar.get(Calendar.MILLISECOND)); + return ZonedDateTime.of(localDateTime, ZoneOffset.UTC); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java b/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java new file mode 100644 index 00000000000..183f385aa4a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/session/DefaultDseSession.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.session; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.session.SessionWrapper; +import net.jcip.annotations.ThreadSafe; + +/** + * @deprecated DSE functionality is now exposed directly on {@link CqlSession}. This class is + * preserved for backward compatibility, but {@link DefaultSession} should be used instead. + */ +@ThreadSafe +@Deprecated +public class DefaultDseSession extends SessionWrapper + implements com.datastax.dse.driver.api.core.DseSession { + + public DefaultDseSession(Session delegate) { + super(delegate); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java new file mode 100644 index 00000000000..55da2a9475f --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrar.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec; + +import static com.datastax.oss.driver.internal.core.util.Dependency.ESRI; + +import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DseTypeCodecsRegistrar { + + private static final Logger LOG = LoggerFactory.getLogger(DseTypeCodecsRegistrar.class); + + public static void registerDseCodecs(MutableCodecRegistry registry) { + registry.register(DseTypeCodecs.DATE_RANGE); + if (DefaultDependencyChecker.isPresent(ESRI)) { + registry.register(DseTypeCodecs.LINE_STRING, DseTypeCodecs.POINT, DseTypeCodecs.POLYGON); + } else { + LOG.debug("ESRI was not found on the classpath: geo codecs will not be available"); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java new file mode 100644 index 00000000000..afd8d6cf9f6 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/DseTypeCodecsRegistrarSubstitutions.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec; + +import static com.datastax.oss.driver.internal.core.util.Dependency.ESRI; + +import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import java.util.function.BooleanSupplier; + +@SuppressWarnings("unused") +public class DseTypeCodecsRegistrarSubstitutions { + + @TargetClass(value = DseTypeCodecsRegistrar.class, onlyWith = EsriMissing.class) + public static final class DseTypeCodecsRegistrarEsriMissing { + + @Substitute + public static void registerDseCodecs(MutableCodecRegistry registry) { + registry.register(DseTypeCodecs.DATE_RANGE); + } + } + + public static class EsriMissing implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return !GraalDependencyChecker.isPresent(ESRI); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java new file mode 100644 index 00000000000..f6309bc1860 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodec.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import static com.datastax.oss.driver.internal.core.util.Strings.isQuoted; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.internal.core.util.Strings; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import net.jcip.annotations.ThreadSafe; + +/** Base class for geospatial type codecs. */ +@ThreadSafe +public abstract class GeometryCodec implements TypeCodec { + + @Nullable + @Override + public T decode(@Nullable ByteBuffer bb, @NonNull ProtocolVersion protocolVersion) { + return bb == null || bb.remaining() == 0 ? null : fromWellKnownBinary(bb.slice()); + } + + @Nullable + @Override + public ByteBuffer encode(@Nullable T geometry, @NonNull ProtocolVersion protocolVersion) { + return geometry == null ? null : toWellKnownBinary(geometry); + } + + @Nullable + @Override + public T parse(@Nullable String s) { + if (s == null) { + return null; + } + s = s.trim(); + if (s.isEmpty() || s.equalsIgnoreCase("NULL")) { + return null; + } + if (!isQuoted(s)) { + throw new IllegalArgumentException("Geometry values must be enclosed by single quotes"); + } + return fromWellKnownText(Strings.unquote(s)); + } + + @NonNull + @Override + public String format(@Nullable T geometry) throws IllegalArgumentException { + return geometry == null ? "NULL" : Strings.quote(toWellKnownText(geometry)); + } + + /** + * Creates an instance of this codec's geospatial type from its Well-known Text (WKT) representation. + * + * @param source the Well-known Text representation to parse. Cannot be null. + * @return A new instance of this codec's geospatial type. + * @throws IllegalArgumentException if the string does not contain a valid Well-known Text + * representation. + */ + @NonNull + protected abstract T fromWellKnownText(@NonNull String source); + + /** + * Creates an instance of a geospatial type from its Well-known Binary + * (WKB) representation. + * + * @param bb the Well-known Binary representation to parse. Cannot be null. + * @return A new instance of this codec's geospatial type. + * @throws IllegalArgumentException if the given {@link ByteBuffer} does not contain a valid + * Well-known Binary representation. + */ + @NonNull + protected abstract T fromWellKnownBinary(@NonNull ByteBuffer bb); + + /** + * Returns a Well-known Text (WKT) + * representation of the given geospatial object. + * + * @param geometry the geospatial object to convert. Cannot be null. + * @return A Well-known Text representation of the given object. + */ + @NonNull + protected abstract String toWellKnownText(@NonNull T geometry); + + /** + * Returns a Well-known + * Binary (WKB) representation of the given geospatial object. + * + * @param geometry the geospatial object to convert. Cannot be null. + * @return A Well-known Binary representation of the given object. + */ + @NonNull + protected abstract ByteBuffer toWellKnownBinary(@NonNull T geometry); +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java new file mode 100644 index 00000000000..bbec99a4103 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodec.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.type.DseDataTypes; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.esri.core.geometry.ogc.OGCLineString; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import net.jcip.annotations.ThreadSafe; + +/** + * A custom type codec to use {@link LineString} instances in driver. + * + *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, + * it will automatically register this codec. + */ +@ThreadSafe +public class LineStringCodec extends GeometryCodec { + + private static final GenericType JAVA_TYPE = GenericType.of(LineString.class); + + @NonNull + @Override + public GenericType getJavaType() { + return JAVA_TYPE; + } + + @NonNull + @Override + protected LineString fromWellKnownText(@NonNull String source) { + return new DefaultLineString(DefaultGeometry.fromOgcWellKnownText(source, OGCLineString.class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return javaClass == LineString.class; + } + + @Override + public boolean accepts(@NonNull Object value) { + return value instanceof LineString; + } + + @NonNull + @Override + protected LineString fromWellKnownBinary(@NonNull ByteBuffer bb) { + return new DefaultLineString(DefaultGeometry.fromOgcWellKnownBinary(bb, OGCLineString.class)); + } + + @NonNull + @Override + protected String toWellKnownText(@NonNull LineString geometry) { + return geometry.asWellKnownText(); + } + + @NonNull + @Override + protected ByteBuffer toWellKnownBinary(@NonNull LineString geometry) { + return geometry.asWellKnownBinary(); + } + + @NonNull + @Override + public DataType getCqlType() { + return DseDataTypes.LINE_STRING; + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java new file mode 100644 index 00000000000..5ebae64cbab --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodec.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.type.DseDataTypes; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.esri.core.geometry.ogc.OGCPoint; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import net.jcip.annotations.ThreadSafe; + +/** + * A custom type codec to use {@link Point} instances in the driver. + * + *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, + * it will automatically register this codec. + */ +@ThreadSafe +public class PointCodec extends GeometryCodec { + + private static final GenericType JAVA_TYPE = GenericType.of(Point.class); + + @NonNull + @Override + public GenericType getJavaType() { + return JAVA_TYPE; + } + + @NonNull + @Override + public DataType getCqlType() { + return DseDataTypes.POINT; + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return javaClass == Point.class; + } + + @Override + public boolean accepts(@NonNull Object value) { + return value instanceof Point; + } + + @NonNull + @Override + protected String toWellKnownText(@NonNull Point geometry) { + return geometry.asWellKnownText(); + } + + @NonNull + @Override + protected ByteBuffer toWellKnownBinary(@NonNull Point geometry) { + return geometry.asWellKnownBinary(); + } + + @NonNull + @Override + protected Point fromWellKnownText(@NonNull String source) { + return new DefaultPoint(DefaultGeometry.fromOgcWellKnownText(source, OGCPoint.class)); + } + + @NonNull + @Override + protected Point fromWellKnownBinary(@NonNull ByteBuffer source) { + return new DefaultPoint(DefaultGeometry.fromOgcWellKnownBinary(source, OGCPoint.class)); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java new file mode 100644 index 00000000000..00a070a4b4a --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodec.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.type.DseDataTypes; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultGeometry; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.esri.core.geometry.ogc.OGCPolygon; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import net.jcip.annotations.ThreadSafe; + +/** + * A custom type codec to use {@link Polygon} instances in the driver. + * + *

If you use {@link com.datastax.dse.driver.api.core.DseSessionBuilder} to build your cluster, + * it will automatically register this codec. + */ +@ThreadSafe +public class PolygonCodec extends GeometryCodec { + + private static final GenericType JAVA_TYPE = GenericType.of(Polygon.class); + + @NonNull + @Override + public GenericType getJavaType() { + return JAVA_TYPE; + } + + @NonNull + @Override + public DataType getCqlType() { + return DseDataTypes.POLYGON; + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return javaClass == Polygon.class; + } + + @Override + public boolean accepts(@NonNull Object value) { + return value instanceof Polygon; + } + + @NonNull + @Override + protected Polygon fromWellKnownText(@NonNull String source) { + return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownText(source, OGCPolygon.class)); + } + + @NonNull + @Override + protected Polygon fromWellKnownBinary(@NonNull ByteBuffer bb) { + return new DefaultPolygon(DefaultGeometry.fromOgcWellKnownBinary(bb, OGCPolygon.class)); + } + + @NonNull + @Override + protected String toWellKnownText(@NonNull Polygon geometry) { + return geometry.asWellKnownText(); + } + + @NonNull + @Override + protected ByteBuffer toWellKnownBinary(@NonNull Polygon geometry) { + return geometry.asWellKnownBinary(); + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java new file mode 100644 index 00000000000..e8a23e88848 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodec.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.time; + +import com.datastax.dse.driver.api.core.data.time.DateRange; +import com.datastax.dse.driver.api.core.data.time.DateRangeBound; +import com.datastax.dse.driver.api.core.data.time.DateRangePrecision; +import com.datastax.dse.driver.api.core.type.DseDataTypes; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.util.Strings; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.text.ParseException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Optional; + +public class DateRangeCodec implements TypeCodec { + + private static final GenericType JAVA_TYPE = GenericType.of(DateRange.class); + private static final DataType CQL_TYPE = DseDataTypes.DATE_RANGE; + + // e.g. [2001-01-01] + private static final byte DATE_RANGE_TYPE_SINGLE_DATE = 0x00; + // e.g. [2001-01-01 TO 2001-01-31] + private static final byte DATE_RANGE_TYPE_CLOSED_RANGE = 0x01; + // e.g. [2001-01-01 TO *] + private static final byte DATE_RANGE_TYPE_OPEN_RANGE_HIGH = 0x02; + // e.g. [* TO 2001-01-01] + private static final byte DATE_RANGE_TYPE_OPEN_RANGE_LOW = 0x03; + // [* TO *] + private static final byte DATE_RANGE_TYPE_BOTH_OPEN_RANGE = 0x04; + // * + private static final byte DATE_RANGE_TYPE_SINGLE_DATE_OPEN = 0x05; + + @NonNull + @Override + public GenericType getJavaType() { + return JAVA_TYPE; + } + + @NonNull + @Override + public DataType getCqlType() { + return CQL_TYPE; + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return javaClass == DateRange.class; + } + + @Nullable + @Override + public ByteBuffer encode( + @Nullable DateRange dateRange, @NonNull ProtocolVersion protocolVersion) { + if (dateRange == null) { + return null; + } + byte rangeType = encodeType(dateRange); + int bufferSize = 1; + DateRangeBound lowerBound = dateRange.getLowerBound(); + Optional maybeUpperBound = dateRange.getUpperBound(); + bufferSize += lowerBound.isUnbounded() ? 0 : 9; + bufferSize += maybeUpperBound.map(upperBound -> upperBound.isUnbounded() ? 0 : 9).orElse(0); + ByteBuffer buffer = ByteBuffer.allocate(bufferSize); + buffer.put(rangeType); + if (!lowerBound.isUnbounded()) { + put(buffer, lowerBound); + } + maybeUpperBound.ifPresent( + upperBound -> { + if (!upperBound.isUnbounded()) { + put(buffer, upperBound); + } + }); + return (ByteBuffer) buffer.flip(); + } + + private static byte encodeType(DateRange dateRange) { + if (dateRange.isSingleBounded()) { + return dateRange.getLowerBound().isUnbounded() + ? DATE_RANGE_TYPE_SINGLE_DATE_OPEN + : DATE_RANGE_TYPE_SINGLE_DATE; + } else { + DateRangeBound upperBound = + dateRange + .getUpperBound() + .orElseThrow( + () -> + new IllegalStateException("Upper bound should be set if !isSingleBounded()")); + if (dateRange.getLowerBound().isUnbounded()) { + return upperBound.isUnbounded() + ? DATE_RANGE_TYPE_BOTH_OPEN_RANGE + : DATE_RANGE_TYPE_OPEN_RANGE_LOW; + } else { + return upperBound.isUnbounded() + ? DATE_RANGE_TYPE_OPEN_RANGE_HIGH + : DATE_RANGE_TYPE_CLOSED_RANGE; + } + } + } + + private static void put(ByteBuffer buffer, DateRangeBound bound) { + buffer.putLong(bound.getTimestamp().toInstant().toEpochMilli()); + buffer.put(bound.getPrecision().getEncoding()); + } + + @Nullable + @Override + public DateRange decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) { + return null; + } + byte type = bytes.get(); + switch (type) { + case DATE_RANGE_TYPE_SINGLE_DATE: + return new DateRange(decodeLowerBound(bytes)); + case DATE_RANGE_TYPE_CLOSED_RANGE: + return new DateRange(decodeLowerBound(bytes), decodeUpperBound(bytes)); + case DATE_RANGE_TYPE_OPEN_RANGE_HIGH: + return new DateRange(decodeLowerBound(bytes), DateRangeBound.UNBOUNDED); + case DATE_RANGE_TYPE_OPEN_RANGE_LOW: + return new DateRange(DateRangeBound.UNBOUNDED, decodeUpperBound(bytes)); + case DATE_RANGE_TYPE_BOTH_OPEN_RANGE: + return new DateRange(DateRangeBound.UNBOUNDED, DateRangeBound.UNBOUNDED); + case DATE_RANGE_TYPE_SINGLE_DATE_OPEN: + return new DateRange(DateRangeBound.UNBOUNDED); + default: + throw new IllegalArgumentException("Unknown date range type: " + type); + } + } + + private static DateRangeBound decodeLowerBound(ByteBuffer bytes) { + long epochMilli = bytes.getLong(); + ZonedDateTime timestamp = + ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC); + DateRangePrecision precision = DateRangePrecision.fromEncoding(bytes.get()); + return DateRangeBound.lowerBound(timestamp, precision); + } + + private static DateRangeBound decodeUpperBound(ByteBuffer bytes) { + long epochMilli = bytes.getLong(); + ZonedDateTime timestamp = + ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMilli), ZoneOffset.UTC); + DateRangePrecision precision = DateRangePrecision.fromEncoding(bytes.get()); + return DateRangeBound.upperBound(timestamp, precision); + } + + @NonNull + @Override + public String format(@Nullable DateRange dateRange) { + return (dateRange == null) ? "NULL" : Strings.quote(dateRange.toString()); + } + + @Nullable + @Override + public DateRange parse(@Nullable String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { + return null; + } + try { + return DateRange.parse(Strings.unquote(value)); + } catch (ParseException e) { + throw new IllegalArgumentException(String.format("Invalid date range literal: %s", value), e); + } + } +} diff --git a/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java b/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java new file mode 100644 index 00000000000..ea9ccd7d622 --- /dev/null +++ b/core/src/main/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueue.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.util.concurrent; + +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Deque; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.atomic.AtomicReference; + +/** + * A concurrent queue with a limited size. + * + *

Once the queue is full, the insertion of the next element is delayed until space becomes + * available again; in the meantime, additional insertions are not allowed (in other words, there + * can be at most one "pending" element waiting on a full queue). + */ +public class BoundedConcurrentQueue { + + private final Deque elements = new ConcurrentLinkedDeque<>(); + private final AtomicReference state; + + public BoundedConcurrentQueue(int maxSize) { + this.state = new AtomicReference<>(new State(maxSize)); + } + + /** + * @return a stage that completes when the element is inserted. If there was still space in the + * queue, it will be already complete; if the queue was full, it will complete at a later + * point in time (triggered by a call to {@link #poll()}). This method must not be invoked + * again until the stage has completed. + * @throws IllegalStateException if the method is invoked before the stage returned by the + * previous call has completed. + */ + @NonNull + public CompletionStage offer(@NonNull ElementT element) { + while (true) { + State oldState = state.get(); + State newState = oldState.increment(); + if (state.compareAndSet(oldState, newState)) { + if (newState.spaceAvailable != null) { + return newState.spaceAvailable.thenApply( + (aVoid) -> { + elements.offer(element); + return element; + }); + } else { + elements.offer(element); + return CompletableFuture.completedFuture(element); + } + } + } + } + + @Nullable + public ElementT poll() { + while (true) { + State oldState = state.get(); + if (oldState.size == 0) { + return null; + } + State newState = oldState.decrement(); + if (state.compareAndSet(oldState, newState)) { + if (oldState.spaceAvailable != null) { + oldState.spaceAvailable.complete(null); + } + return elements.poll(); + } + } + } + + @Nullable + public ElementT peek() { + return elements.peek(); + } + + /** + * Note that this does not complete a pending call to {@link #offer(Object)}. We only use this + * method for terminal states where we want to dereference the contained elements. + */ + public void clear() { + elements.clear(); + } + + private static class State { + + private final int maxSize; + + final int size; // Number of elements in the queue, + 1 if one is waiting to get in + final CompletableFuture spaceAvailable; // Not null iff size == maxSize + 1 + + State(int maxSize) { + this(0, null, maxSize); + } + + private State(int size, CompletableFuture spaceAvailable, int maxSize) { + this.maxSize = maxSize; + this.size = size; + this.spaceAvailable = spaceAvailable; + } + + State increment() { + if (size > maxSize) { + throw new IllegalStateException( + "Can't call offer() until the stage returned by the previous offer() call has completed"); + } + int newSize = size + 1; + CompletableFuture newFuture = + (newSize == maxSize + 1) ? new CompletableFuture<>() : null; + return new State(newSize, newFuture, maxSize); + } + + State decrement() { + return new State(size - 1, null, maxSize); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java index a897c4d9e27..b6f1bf93838 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/AllNodesFailedException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,87 +19,169 @@ import com.datastax.oss.driver.api.core.cql.ExecutionInfo; import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.base.Joiner; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; /** * Thrown when a query failed on all the coordinators it was tried on. This exception may wrap - * multiple errors, use {@link #getErrors()} to inspect the individual problem on each node. + * multiple errors, that are available either as {@linkplain #getSuppressed() suppressed + * exceptions}, or via {@link #getAllErrors()} where they are grouped by node. */ public class AllNodesFailedException extends DriverException { + /** @deprecated Use {@link #fromErrors(List)} instead. */ @NonNull + @Deprecated public static AllNodesFailedException fromErrors(@Nullable Map errors) { if (errors == null || errors.isEmpty()) { return new NoNodeAvailableException(); } else { - return new AllNodesFailedException(ImmutableMap.copyOf(errors)); + return new AllNodesFailedException(groupByNode(errors)); } } @NonNull - public static AllNodesFailedException fromErrors( - @Nullable List> errors) { - Map map; + public static AllNodesFailedException fromErrors(@Nullable List> errors) { if (errors == null || errors.isEmpty()) { - map = null; + return new NoNodeAvailableException(); } else { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Map.Entry entry : errors) { - builder.put(entry); - } - map = builder.build(); + return new AllNodesFailedException(groupByNode(errors)); } - return fromErrors(map); } - private final Map errors; + private final Map> errors; + /** @deprecated Use {@link #AllNodesFailedException(String, ExecutionInfo, Iterable)} instead. */ + @Deprecated protected AllNodesFailedException( @NonNull String message, @Nullable ExecutionInfo executionInfo, @NonNull Map errors) { super(message, executionInfo, null, true); - this.errors = errors; + this.errors = toDeepImmutableMap(groupByNode(errors)); + addSuppressedErrors(); + } + + protected AllNodesFailedException( + @NonNull String message, + @Nullable ExecutionInfo executionInfo, + @NonNull Iterable>> errors) { + super(message, executionInfo, null, true); + this.errors = toDeepImmutableMap(errors); + addSuppressedErrors(); + } + + private void addSuppressedErrors() { + for (List errors : this.errors.values()) { + for (Throwable error : errors) { + addSuppressed(error); + } + } } - private AllNodesFailedException(Map errors) { + private AllNodesFailedException(Map> errors) { this( buildMessage( String.format("All %d node(s) tried for the query failed", errors.size()), errors), null, - errors); + errors.entrySet()); } - private static String buildMessage(String baseMessage, Map errors) { + private static String buildMessage(String baseMessage, Map> errors) { int limit = Math.min(errors.size(), 3); - String details = - Joiner.on(", ").withKeyValueSeparator(": ").join(Iterables.limit(errors.entrySet(), limit)); - + Iterator>> iterator = + Iterables.limit(errors.entrySet(), limit).iterator(); + StringBuilder details = new StringBuilder(); + while (iterator.hasNext()) { + Entry> entry = iterator.next(); + details.append(entry.getKey()).append(": ").append(entry.getValue()); + if (iterator.hasNext()) { + details.append(", "); + } + } return String.format( - baseMessage + " (showing first %d, use getErrors() for more: %s)", limit, details); + "%s (showing first %d nodes, use getAllErrors() for more): %s", + baseMessage, limit, details); } - /** The details of the individual error on each node. */ + /** + * An immutable map containing the first error on each tried node. + * + * @deprecated Use {@link #getAllErrors()} instead. + */ @NonNull + @Deprecated public Map getErrors() { + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (Node node : errors.keySet()) { + List nodeErrors = errors.get(node); + if (!nodeErrors.isEmpty()) { + builder.put(node, nodeErrors.get(0)); + } + } + return builder.build(); + } + + /** An immutable map containing all errors on each tried node. */ + @NonNull + public Map> getAllErrors() { return errors; } @NonNull @Override public DriverException copy() { - return new AllNodesFailedException(getMessage(), getExecutionInfo(), errors); + return new AllNodesFailedException(getMessage(), getExecutionInfo(), errors.entrySet()); } @NonNull public AllNodesFailedException reword(String newMessage) { return new AllNodesFailedException( - buildMessage(newMessage, errors), getExecutionInfo(), errors); + buildMessage(newMessage, errors), getExecutionInfo(), errors.entrySet()); + } + + private static Map> groupByNode(Map errors) { + return groupByNode(errors.entrySet()); + } + + private static Map> groupByNode(Iterable> errors) { + // no need for immutable collections here + Map> map = new LinkedHashMap<>(); + for (Entry entry : errors) { + Node node = entry.getKey(); + Throwable error = entry.getValue(); + map.compute( + node, + (k, v) -> { + if (v == null) { + v = new ArrayList<>(); + } + v.add(error); + return v; + }); + } + return map; + } + + private static Map> toDeepImmutableMap(Map> errors) { + return toDeepImmutableMap(errors.entrySet()); + } + + private static Map> toDeepImmutableMap( + Iterable>> errors) { + ImmutableMap.Builder> builder = ImmutableMap.builder(); + for (Entry> entry : errors) { + builder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); + } + return builder.build(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java index f84cdf26c86..7f8cafbc895 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncAutoCloseable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java index 9abe4136f66..fd7c5be6baa 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/AsyncPagingIterable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java b/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java index 65e32308fca..a1b6d8006df 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/ConsistencyLevel.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,18 @@ */ public interface ConsistencyLevel { + ConsistencyLevel ANY = DefaultConsistencyLevel.ANY; + ConsistencyLevel ONE = DefaultConsistencyLevel.ONE; + ConsistencyLevel TWO = DefaultConsistencyLevel.TWO; + ConsistencyLevel THREE = DefaultConsistencyLevel.THREE; + ConsistencyLevel QUORUM = DefaultConsistencyLevel.QUORUM; + ConsistencyLevel ALL = DefaultConsistencyLevel.ALL; + ConsistencyLevel LOCAL_ONE = DefaultConsistencyLevel.LOCAL_ONE; + ConsistencyLevel LOCAL_QUORUM = DefaultConsistencyLevel.LOCAL_QUORUM; + ConsistencyLevel EACH_QUORUM = DefaultConsistencyLevel.EACH_QUORUM; + ConsistencyLevel SERIAL = DefaultConsistencyLevel.SERIAL; + ConsistencyLevel LOCAL_SERIAL = DefaultConsistencyLevel.LOCAL_SERIAL; + /** The numerical value that the level is encoded to in protocol frames. */ int getProtocolCode(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java index 89211d75382..82e4c2b30a6 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/CqlIdentifier.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,6 +23,7 @@ import java.io.IOException; import java.io.ObjectInputStream; import java.io.Serializable; +import java.util.Locale; import net.jcip.annotations.Immutable; /** @@ -75,7 +78,7 @@ public static CqlIdentifier fromCql(@NonNull String cql) { if (Strings.isDoubleQuoted(cql)) { internal = Strings.unDoubleQuote(cql); } else { - internal = cql.toLowerCase(); + internal = cql.toLowerCase(Locale.ROOT); Preconditions.checkArgument( !Strings.needsDoubleQuotes(internal), "Invalid CQL form [%s]: needs double quotes", cql); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java index 04a98054dc0..ff096719f3e 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSession.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,218 +17,54 @@ */ package com.datastax.oss.driver.api.core; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PrepareRequest; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousSession; +import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveSession; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveSession; +import com.datastax.dse.driver.api.core.graph.GraphSession; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphSession; +import com.datastax.oss.driver.api.core.cql.AsyncCqlSession; +import com.datastax.oss.driver.api.core.cql.SyncCqlSession; import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Objects; -import java.util.concurrent.CompletionStage; - -/** A specialized session with convenience methods to execute CQL statements. */ -public interface CqlSession extends Session { - - /** Returns a builder to create a new instance. */ - @NonNull - static CqlSessionBuilder builder() { - return new CqlSessionBuilder(); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - */ - @NonNull - default ResultSet execute(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, Statement.SYNC), "The CQL processor should never return a null result"); - } - - /** - * Executes a CQL statement synchronously (the calling thread blocks until the result becomes - * available). - */ - @NonNull - default ResultSet execute(@NonNull String query) { - return execute(SimpleStatement.newInstance(query)); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - */ - @NonNull - default CompletionStage executeAsync(@NonNull Statement statement) { - return Objects.requireNonNull( - execute(statement, Statement.ASYNC), "The CQL processor should never return a null result"); - } - - /** - * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, - * generally before the result is available). - */ - @NonNull - default CompletionStage executeAsync(@NonNull String query) { - return executeAsync(SimpleStatement.newInstance(query)); - } - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

Note that the bound statements created from the resulting prepared statement will inherit - * some of the attributes of the provided simple statement. That is, given: - * - *

{@code
-   * SimpleStatement simpleStatement = SimpleStatement.newInstance("...");
-   * PreparedStatement preparedStatement = session.prepare(simpleStatement);
-   * BoundStatement boundStatement = preparedStatement.bind();
-   * }
- * - * Then: - * - *
    - *
  • the following methods return the same value as their counterpart on {@code - * simpleStatement}: - *
      - *
    • {@link Request#getExecutionProfileName() boundStatement.getExecutionProfileName()} - *
    • {@link Request#getExecutionProfile() boundStatement.getExecutionProfile()} - *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} - *
    • {@link Request#getRoutingKey() boundStatement.getRoutingKey()} - *
    • {@link Request#getRoutingToken() boundStatement.getRoutingToken()} - *
    • {@link Request#getCustomPayload() boundStatement.getCustomPayload()} - *
    • {@link Request#isIdempotent() boundStatement.isIdempotent()} - *
    • {@link Request#getTimeout() boundStatement.getTimeout()} - *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} - *
    • {@link Statement#getPageSize() boundStatement.getPageSize()} - *
    • {@link Statement#getConsistencyLevel() boundStatement.getConsistencyLevel()} - *
    • {@link Statement#getSerialConsistencyLevel() - * boundStatement.getSerialConsistencyLevel()} - *
    • {@link Statement#isTracing() boundStatement.isTracing()} - *
    - *
  • {@link Request#getRoutingKeyspace() boundStatement.getRoutingKeyspace()} is set from - * either {@link Request#getKeyspace() simpleStatement.getKeyspace()} (if it's not {@code - * null}), or {@code simpleStatement.getRoutingKeyspace()}; - *
  • on the other hand, the following attributes are not propagated: - *
      - *
    • {@link Statement#getQueryTimestamp() boundStatement.getQueryTimestamp()} will be - * set to {@link Long#MIN_VALUE}, meaning that the value will be assigned by the - * session's timestamp generator. - *
    • {@link Statement#getNode() boundStatement.getNode()} will always be {@code null}. - *
    - *
- * - * If you want to customize this behavior, you can write your own implementation of {@link - * PrepareRequest} and pass it to {@link #prepare(PrepareRequest)}. - * - *

The result of this method is cached: if you call it twice with the same {@link - * SimpleStatement}, you will get the same {@link PreparedStatement} instance. We still recommend - * keeping a reference to it (for example by caching it as a field in a DAO); if that's not - * possible (e.g. if query strings are generated dynamically), it's OK to call this method every - * time: there will just be a small performance overhead to check the internal cache. Note that - * caching is based on: - * - *

    - *
  • the query string exactly as you provided it: the driver does not perform any kind of - * trimming or sanitizing. - *
  • all other execution parameters: for example, preparing two statements with identical - * query strings but different {@linkplain SimpleStatement#getConsistencyLevel() consistency - * levels} will yield distinct prepared statements. - *
- */ - @NonNull - default PreparedStatement prepare(@NonNull SimpleStatement statement) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(statement), PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - */ - @NonNull - default PreparedStatement prepare(@NonNull String query) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(query), PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement synchronously (the calling thread blocks until the statement is - * prepared). - * - *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to - * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link - * #prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely have to deal - * with {@link PrepareRequest} directly. - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - */ - @NonNull - default PreparedStatement prepare(@NonNull PrepareRequest request) { - return Objects.requireNonNull( - execute(request, PrepareRequest.SYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

Note that the bound statements created from the resulting prepared statement will inherit - * some of the attributes of {@code query}; see {@link #prepare(SimpleStatement)} for more - * details. - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - */ - @NonNull - default CompletionStage prepareAsync(@NonNull SimpleStatement statement) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(statement), PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } - - /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). - * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). - */ - @NonNull - default CompletionStage prepareAsync(@NonNull String query) { - return Objects.requireNonNull( - execute(new DefaultPrepareRequest(query), PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); - } +/** + * The default session type built by the driver. + * + *

It provides user-friendly execution methods for: + * + *

    + *
  • CQL requests: synchronous, asynchronous or reactive mode; + *
  • requests specific to DataStax Enterprise: graph and continuous paging. + *
+ * + * Client applications can use this interface even if they don't need all the features. In + * particular, it can be used with a regular Apache Cassandra ® cluster, as long as you don't + * call any of the DSE-specific execute methods. If you're in that situation, you might also want to + * exclude certain dependencies from your classpath (see the "Integration" page in the user manual). + * + *

Note that the name "CQL session" is no longer really accurate since this interface can now + * execute other request types; but it was preserved for backward compatibility with previous driver + * versions. + */ +public interface CqlSession + extends Session, + SyncCqlSession, + AsyncCqlSession, + ReactiveSession, + ContinuousSession, + GraphSession, + ContinuousReactiveSession, + ReactiveGraphSession { /** - * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was - * sent, generally before the statement is prepared). + * Returns a builder to create a new instance. * - *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to - * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link - * #prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely have to deal - * with {@link PrepareRequest} directly. + *

Note that this builder is mutable and not thread-safe. * - *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more - * explanations). + * @return {@code CqlSessionBuilder} to create a new instance. */ @NonNull - default CompletionStage prepareAsync(PrepareRequest request) { - return Objects.requireNonNull( - execute(request, PrepareRequest.ASYNC), - "The CQL prepare processor should never return a null result"); + static CqlSessionBuilder builder() { + return new CqlSessionBuilder(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java index 064b6b12779..4598c078dca 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/CqlSessionBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +21,11 @@ import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.NotThreadSafe; -/** Helper class to build a {@link CqlSession} instance. */ +/** + * Helper class to build a {@link CqlSession} instance. + * + *

This class is mutable and not thread-safe. + */ @NotThreadSafe public class CqlSessionBuilder extends SessionBuilder { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java index 34d8875eb8e..2e5a4a6f022 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultConsistencyLevel.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,6 +37,9 @@ public enum DefaultConsistencyLevel implements ConsistencyLevel { SERIAL(ProtocolConstants.ConsistencyLevel.SERIAL), LOCAL_SERIAL(ProtocolConstants.ConsistencyLevel.LOCAL_SERIAL), ; + // Note that, for the sake of convenience, we also expose shortcuts to these constants on the + // ConsistencyLevel interface. If you add a new enum constant, remember to update the interface as + // well. private final int protocolCode; @@ -66,7 +71,7 @@ public boolean isSerial() { return this == SERIAL || this == LOCAL_SERIAL; } - private static Map BY_CODE = mapByCode(values()); + private static final Map BY_CODE = mapByCode(values()); private static Map mapByCode(DefaultConsistencyLevel[] levels) { ImmutableMap.Builder builder = ImmutableMap.builder(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java index feda0c2afc8..91b45fc506a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/DefaultProtocolVersion.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,14 +32,21 @@ public enum DefaultProtocolVersion implements ProtocolVersion { /** Version 4, supported by Cassandra 2.2 and above. */ V4(ProtocolConstants.Version.V4, false), + /** Version 5, supported by Cassandra 4.0 and above. */ + V5(ProtocolConstants.Version.V5, false), + /** - * Version 5, currently supported as a beta preview in Cassandra 3.10 and above. + * Version 6, currently supported as a beta preview in Cassandra 4.0 and above. * *

Do not use this in production. * * @see ProtocolVersion#isBeta() */ - V5(ProtocolConstants.Version.V5, true); + V6(ProtocolConstants.Version.V6, true), + ; + // Note that, for the sake of convenience, we also expose shortcuts to these constants on the + // ProtocolVersion interface. If you add a new enum constant, remember to update the interface as + // well. private final int code; private final boolean beta; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java index 07f79d6e341..f5cf76e29eb 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/DriverException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,7 +41,7 @@ */ public abstract class DriverException extends RuntimeException { - private volatile ExecutionInfo executionInfo; + private transient volatile ExecutionInfo executionInfo; protected DriverException( @Nullable String message, @@ -74,8 +76,14 @@ protected DriverException( * *

Note that this is only set for exceptions that are rethrown directly to the client from a * session call. For example, individual node errors stored in {@link - * AllNodesFailedException#getErrors()} or {@link ExecutionInfo#getErrors()} do not contain their - * own execution info, and therefore return null from this method. + * AllNodesFailedException#getAllErrors()} or {@link ExecutionInfo#getErrors()} do not contain + * their own execution info, and therefore return null from this method. + * + *

This method will also return null for low-level exceptions thrown directly from a driver + * channel, such as {@link com.datastax.oss.driver.api.core.connection.ConnectionInitException} or + * {@link com.datastax.oss.driver.api.core.connection.ClosedConnectionException}. + * + *

It will also be null if you serialize and deserialize an exception. */ public ExecutionInfo getExecutionInfo() { return executionInfo; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java index e7d8e42f2b1..90ff875e375 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/DriverExecutionException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java index 1966606256b..8b4cc5dc5bb 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/DriverTimeoutException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java b/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java index 0b1ec172812..aa3f774800c 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/InvalidKeyspaceException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java index c6cb7ae5831..b3902489a48 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java b/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java index ab26be868dd..3c3f18a5dc2 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/MavenCoordinates.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java index db231adf219..9ef51fb99b6 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/NoNodeAvailableException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +33,7 @@ public NoNodeAvailableException() { } private NoNodeAvailableException(ExecutionInfo executionInfo) { - super("No node was available to execute the query", executionInfo, Collections.emptyMap()); + super("No node was available to execute the query", executionInfo, Collections.emptySet()); } @NonNull diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java new file mode 100644 index 00000000000..5303119844e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/NodeUnavailableException.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; + +/** + * Indicates that a {@link Node} was selected in a query plan, but it had no connection available. + * + *

A common reason to encounter this error is when the configured number of connections per node + * and requests per connection is not high enough to absorb the overall request rate. This can be + * mitigated by tuning the following options: + * + *

    + *
  • {@code advanced.connection.pool.local.size}; + *
  • {@code advanced.connection.pool.remote.size}; + *
  • {@code advanced.connection.max-requests-per-connection}. + *
+ * + * See {@code reference.conf} for more details. + * + *

Another possibility is when you are trying to direct a request {@linkplain + * com.datastax.oss.driver.api.core.cql.Statement#setNode(Node) to a particular node}, but that node + * has no connections available. + */ +public class NodeUnavailableException extends DriverException { + + private final Node node; + + public NodeUnavailableException(Node node) { + super("No connection was available to " + node, null, null, true); + this.node = Objects.requireNonNull(node); + } + + @NonNull + public Node getNode() { + return node; + } + + @Override + @NonNull + public DriverException copy() { + return new NodeUnavailableException(node); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java b/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java index f84c74c8b6a..c2a81b554d0 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/PagingIterable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -102,6 +104,7 @@ default ElementT one() { * reasonable number of results. */ @NonNull + @SuppressWarnings("MixedMutabilityReturnType") default List all() { if (!iterator().hasNext()) { return Collections.emptyList(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java b/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java index e39837cc090..dd69f705453 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/ProtocolVersion.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,7 @@ */ package com.datastax.oss.driver.api.core; +import com.datastax.dse.driver.api.core.DseProtocolVersion; import com.datastax.oss.driver.api.core.detach.Detachable; import edu.umd.cs.findbugs.annotations.NonNull; @@ -26,10 +29,18 @@ * {@code ProtocolVersion}s are {@link DefaultProtocolVersion} instances. */ public interface ProtocolVersion { + + ProtocolVersion V3 = DefaultProtocolVersion.V3; + ProtocolVersion V4 = DefaultProtocolVersion.V4; + ProtocolVersion V5 = DefaultProtocolVersion.V5; + ProtocolVersion V6 = DefaultProtocolVersion.V6; + ProtocolVersion DSE_V1 = DseProtocolVersion.DSE_V1; + ProtocolVersion DSE_V2 = DseProtocolVersion.DSE_V2; + /** The default version used for {@link Detachable detached} objects. */ // Implementation note: we can't use the ProtocolVersionRegistry here, this has to be a // compile-time constant. - ProtocolVersion DEFAULT = DefaultProtocolVersion.V4; + ProtocolVersion DEFAULT = DefaultProtocolVersion.V5; /** * A numeric code that uniquely identifies the version (this is the code used in network frames). diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java b/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java index 5bd44fcb1d2..acf569d55f6 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/RequestThrottlingException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java index d80eba55514..030984dc274 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/UnsupportedProtocolVersionException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/Version.java b/core/src/main/java/com/datastax/oss/driver/api/core/Version.java index f70db10c252..52751e02984 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/Version.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/Version.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -36,16 +39,25 @@ * are ignored for sorting versions. */ @Immutable -public class Version implements Comparable { +public class Version implements Comparable, Serializable { + + private static final long serialVersionUID = 1; private static final String VERSION_REGEXP = - "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:\\-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; + "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; + private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); - public static final Version V2_1_0 = parse("2.1.0"); - public static final Version V2_2_0 = parse("2.2.0"); - public static final Version V3_0_0 = parse("3.0.0"); - public static final Version V4_0_0 = parse("4.0.0"); + @NonNull public static final Version V1_0_0 = Objects.requireNonNull(parse("1.0.0")); + @NonNull public static final Version V2_1_0 = Objects.requireNonNull(parse("2.1.0")); + @NonNull public static final Version V2_2_0 = Objects.requireNonNull(parse("2.2.0")); + @NonNull public static final Version V3_0_0 = Objects.requireNonNull(parse("3.0.0")); + @NonNull public static final Version V4_0_0 = Objects.requireNonNull(parse("4.0.0")); + @NonNull public static final Version V4_1_0 = Objects.requireNonNull(parse("4.1.0")); + @NonNull public static final Version V5_0_0 = Objects.requireNonNull(parse("5.0.0")); + @NonNull public static final Version V6_7_0 = Objects.requireNonNull(parse("6.7.0")); + @NonNull public static final Version V6_8_0 = Objects.requireNonNull(parse("6.8.0")); + @NonNull public static final Version V6_9_0 = Objects.requireNonNull(parse("6.9.0")); private final int major; private final int minor; @@ -111,7 +123,7 @@ public static Version parse(@Nullable String version) { pr == null || pr.isEmpty() ? null : pr.substring(1) - .split("\\-"); // drop initial '-' or '~' then split on the remaining ones + .split("-"); // drop initial '-' or '~' then split on the remaining ones String bl = matcher.group(6); String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' @@ -251,9 +263,7 @@ public int compareTo(@NonNull Version other) { } } - return preReleases.length == other.preReleases.length - ? 0 - : (preReleases.length < other.preReleases.length ? -1 : 1); + return Integer.compare(preReleases.length, other.preReleases.length); } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java index c80e16d3363..47ce62f1461 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java index e85b90e3b04..c73c3e4fb67 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthProvider.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java index abf77e293d5..28dde2123cb 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/AuthenticationException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java index dd92762577e..150a1dfb63f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/Authenticator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,7 +59,11 @@ public interface Authenticator { * Obtain an initial response token for initializing the SASL handshake. * * @return a completion stage that will complete with the initial response to send to the server - * (which may be {@code null}). + * (which may be {@code null}). Note that, if the returned byte buffer is writable, the driver + * will clear its contents immediately after use (to avoid keeping sensitive + * information in memory); do not reuse the same buffer across multiple invocations. + * Alternatively, if the contents are not sensitive, you can make the buffer {@linkplain + * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. */ @NonNull CompletionStage initialResponse(); @@ -68,7 +74,11 @@ public interface Authenticator { * * @param challenge the server's SASL challenge. * @return a completion stage that will complete with the updated SASL token (which may be null to - * indicate the client requires no further action). + * indicate the client requires no further action). Note that, if the returned byte buffer is + * writable, the driver will clear its contents immediately after use (to avoid keeping + * sensitive information in memory); do not reuse the same buffer across multiple invocations. + * Alternatively, if the contents are not sensitive, you can make the buffer {@linkplain + * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. */ @NonNull CompletionStage evaluateChallenge(@Nullable ByteBuffer challenge); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java new file mode 100644 index 00000000000..fb85797af9e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.auth; + +import com.datastax.dse.driver.api.core.auth.BaseDseAuthenticator; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Objects; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Common infrastructure for plain text auth providers. + * + *

This can be reused to write an implementation that retrieves the credentials from another + * source than the configuration. The driver offers one built-in implementation: {@link + * ProgrammaticPlainTextAuthProvider}. + */ +@ThreadSafe +public abstract class PlainTextAuthProviderBase implements AuthProvider { + + private static final Logger LOG = LoggerFactory.getLogger(PlainTextAuthProviderBase.class); + + private final String logPrefix; + + /** + * @param logPrefix a string that will get prepended to the logs (this is used for discrimination + * when you have multiple driver instances executing in the same JVM). Built-in + * implementations fill this with {@link Session#getName()}. + */ + protected PlainTextAuthProviderBase(@NonNull String logPrefix) { + this.logPrefix = Objects.requireNonNull(logPrefix); + } + + /** + * Retrieves the credentials from the underlying source. + * + *

This is invoked every time the driver opens a new connection. + * + * @param endPoint The endpoint being contacted. + * @param serverAuthenticator The authenticator class sent by the endpoint. + */ + @NonNull + protected abstract Credentials getCredentials( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator); + + @NonNull + @Override + public Authenticator newAuthenticator( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) + throws AuthenticationException { + return new PlainTextAuthenticator( + getCredentials(endPoint, serverAuthenticator), endPoint, serverAuthenticator); + } + + @Override + public void onMissingChallenge(@NonNull EndPoint endPoint) { + LOG.warn( + "[{}] {} did not send an authentication challenge; " + + "This is suspicious because the driver expects authentication", + logPrefix, + endPoint); + } + + @Override + public void close() { + // nothing to do + } + + public static class Credentials { + + private final char[] username; + private final char[] password; + private final char[] authorizationId; + + /** + * Builds an instance for username/password authentication, and proxy authentication with the + * given authorizationId. + * + *

This feature is only available with DataStax Enterprise. If the target server is Apache + * Cassandra, the authorizationId will be ignored. + */ + public Credentials( + @NonNull char[] username, @NonNull char[] password, @NonNull char[] authorizationId) { + this.username = Objects.requireNonNull(username); + this.password = Objects.requireNonNull(password); + this.authorizationId = Objects.requireNonNull(authorizationId); + } + + /** Builds an instance for simple username/password authentication. */ + public Credentials(@NonNull char[] username, @NonNull char[] password) { + this(username, password, new char[0]); + } + + @NonNull + public char[] getUsername() { + return username; + } + + /** + * @deprecated this method only exists for backward compatibility. It is a synonym for {@link + * #getUsername()}, which should be used instead. + */ + @Deprecated + @NonNull + public char[] getAuthenticationId() { + return username; + } + + @NonNull + public char[] getPassword() { + return password; + } + + @NonNull + public char[] getAuthorizationId() { + return authorizationId; + } + + /** Clears the credentials from memory when they're no longer needed. */ + protected void clear() { + // Note: this is a bit irrelevant with the built-in provider, because the config already + // caches the credentials in memory. But it might be useful for a custom implementation that + // retrieves the credentials from a different source. + Arrays.fill(getUsername(), (char) 0); + Arrays.fill(getPassword(), (char) 0); + Arrays.fill(getAuthorizationId(), (char) 0); + } + } + + // Implementation note: BaseDseAuthenticator is backward compatible with Cassandra authenticators. + // This will work with both Cassandra (as long as no authorizationId is set) and DSE. + protected static class PlainTextAuthenticator extends BaseDseAuthenticator { + + private static final ByteBuffer MECHANISM = + ByteBuffer.wrap("PLAIN".getBytes(StandardCharsets.UTF_8)).asReadOnlyBuffer(); + + private static final ByteBuffer SERVER_INITIAL_CHALLENGE = + ByteBuffer.wrap("PLAIN-START".getBytes(StandardCharsets.UTF_8)).asReadOnlyBuffer(); + + private static final EndPoint DUMMY_END_POINT = + new EndPoint() { + @NonNull + @Override + public SocketAddress resolve() { + return new InetSocketAddress("127.0.0.1", 9042); + } + + @NonNull + @Override + public String asMetricPrefix() { + return ""; // will never be used + } + }; + + private final ByteBuffer encodedCredentials; + private final EndPoint endPoint; + + protected PlainTextAuthenticator( + @NonNull Credentials credentials, + @NonNull EndPoint endPoint, + @NonNull String serverAuthenticator) { + super(serverAuthenticator); + + Objects.requireNonNull(credentials); + Objects.requireNonNull(endPoint); + + ByteBuffer authorizationId = toUtf8Bytes(credentials.getAuthorizationId()); + ByteBuffer username = toUtf8Bytes(credentials.getUsername()); + ByteBuffer password = toUtf8Bytes(credentials.getPassword()); + + this.encodedCredentials = + ByteBuffer.allocate( + authorizationId.remaining() + username.remaining() + password.remaining() + 2); + encodedCredentials.put(authorizationId); + encodedCredentials.put((byte) 0); + encodedCredentials.put(username); + encodedCredentials.put((byte) 0); + encodedCredentials.put(password); + encodedCredentials.flip(); + + clear(authorizationId); + clear(username); + clear(password); + + this.endPoint = endPoint; + } + + /** + * @deprecated Preserved for backward compatibility, implementors should use the 3-arg + * constructor {@code PlainTextAuthenticator(Credentials, EndPoint, String)} instead. + */ + @Deprecated + protected PlainTextAuthenticator(@NonNull Credentials credentials) { + this( + credentials, + // It's unlikely that this class was ever extended by third parties, but if it was, assume + // that it was not written for DSE: + // - dummy end point because we should never need to build an auth exception + DUMMY_END_POINT, + // - default OSS authenticator name (the only thing that matters is how this string + // compares to "DseAuthenticator") + "org.apache.cassandra.auth.PasswordAuthenticator"); + } + + private static ByteBuffer toUtf8Bytes(char[] charArray) { + CharBuffer charBuffer = CharBuffer.wrap(charArray); + return Charsets.UTF_8.encode(charBuffer); + } + + private static void clear(ByteBuffer buffer) { + buffer.rewind(); + while (buffer.remaining() > 0) { + buffer.put((byte) 0); + } + } + + @NonNull + @Override + public ByteBuffer getMechanism() { + return MECHANISM; + } + + @NonNull + @Override + public ByteBuffer getInitialServerChallenge() { + return SERVER_INITIAL_CHALLENGE; + } + + @Nullable + @Override + public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge) { + if (SERVER_INITIAL_CHALLENGE.equals(challenge)) { + return encodedCredentials; + } + throw new AuthenticationException(endPoint, "Incorrect challenge from server"); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java new file mode 100644 index 00000000000..d991f5c5cb5 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.auth; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.internal.core.util.Strings; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import net.jcip.annotations.ThreadSafe; + +/** + * A simple plaintext {@link AuthProvider} that receives the credentials programmatically instead of + * pulling them from the configuration. + * + *

To use this class, create an instance with the appropriate credentials to use and pass it to + * your session builder: + * + *

+ * AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("...", "...");
+ * CqlSession session =
+ *     CqlSession.builder()
+ *         .addContactEndPoints(...)
+ *         .withAuthProvider(authProvider)
+ *         .build();
+ * 
+ * + *

It also offers the possibility of changing the credentials at runtime. The new credentials + * will be used for all connections initiated after the change. + * + *

Implementation Note: this implementation is not particularly suited for highly-sensitive + * applications: it stores the credentials to use as private fields, and even if the fields are char + * arrays rather than strings to make it difficult to dump their contents, they are never cleared + * until the provider itself is garbage-collected, which typically only happens when the session is + * closed. + * + * @see SessionBuilder#withAuthProvider(AuthProvider) + * @see SessionBuilder#withAuthCredentials(String, String) + * @see SessionBuilder#withAuthCredentials(String, String, String) + */ +@ThreadSafe +public class ProgrammaticPlainTextAuthProvider extends PlainTextAuthProviderBase { + + private volatile char[] username; + private volatile char[] password; + private volatile char[] authorizationId; + + /** Builds an instance for simple username/password authentication. */ + public ProgrammaticPlainTextAuthProvider(@NonNull String username, @NonNull String password) { + this(username, password, ""); + } + + /** + * Builds an instance for username/password authentication, and proxy authentication with the + * given authorizationId. + * + *

This feature is only available with DataStax Enterprise. If the target server is Apache + * Cassandra, use {@link #ProgrammaticPlainTextAuthProvider(String, String)} instead, or set the + * authorizationId to an empty string. + */ + public ProgrammaticPlainTextAuthProvider( + @NonNull String username, @NonNull String password, @NonNull String authorizationId) { + // This will typically be built before the session so we don't know the log prefix yet. Pass an + // empty string, it's only used in one log message. + super(""); + this.username = Strings.requireNotEmpty(username, "username").toCharArray(); + this.password = Strings.requireNotEmpty(password, "password").toCharArray(); + this.authorizationId = + Objects.requireNonNull(authorizationId, "authorizationId cannot be null").toCharArray(); + } + + /** + * Changes the username. + * + *

The new credentials will be used for all connections initiated after this method was called. + * + * @param username the new name. + */ + public void setUsername(@NonNull String username) { + this.username = Strings.requireNotEmpty(username, "username").toCharArray(); + } + + /** + * Changes the password. + * + *

The new credentials will be used for all connections initiated after this method was called. + * + * @param password the new password. + */ + public void setPassword(@NonNull String password) { + this.password = Strings.requireNotEmpty(password, "password").toCharArray(); + } + + /** + * Changes the authorization id. + * + *

The new credentials will be used for all connections initiated after this method was called. + * + *

This feature is only available with DataStax Enterprise. If the target server is Apache + * Cassandra, this method should not be used. + * + * @param authorizationId the new authorization id. + */ + public void setAuthorizationId(@NonNull String authorizationId) { + this.authorizationId = + Objects.requireNonNull(authorizationId, "authorizationId cannot be null").toCharArray(); + } + + /** + * {@inheritDoc} + * + *

This implementation disregards the endpoint being connected to as well as the authenticator + * class sent by the server, and always returns the same credentials. + */ + @NonNull + @Override + protected Credentials getCredentials( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { + return new Credentials(username.clone(), password.clone(), authorizationId.clone()); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java index d2d1d5d5f3b..016ac25680b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/SyncAuthenticator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,7 +37,11 @@ public interface SyncAuthenticator extends Authenticator { *

{@link #initialResponse()} calls this and wraps the result in an immediately completed * future. * - * @return The initial response to send to the server (which may be {@code null}). + * @return The initial response to send to the server (which may be {@code null}). Note that, if + * the returned byte buffer is writable, the driver will clear its contents immediately + * after use (to avoid keeping sensitive information in memory); do not reuse the same buffer + * across multiple invocations. Alternatively, if the contents are not sensitive, you can make + * the buffer {@linkplain ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. */ @Nullable ByteBuffer initialResponseSync(); @@ -48,7 +54,11 @@ public interface SyncAuthenticator extends Authenticator { * * @param challenge the server's SASL challenge; may be {@code null}. * @return The updated SASL token (which may be {@code null} to indicate the client requires no - * further action). + * further action). Note that, if the returned byte buffer is writable, the driver will + * clear its contents immediately after use (to avoid keeping sensitive information in + * memory); do not reuse the same buffer across multiple invocations. Alternatively, if the + * contents are not sensitive, you can make the buffer {@linkplain + * ByteBuffer#asReadOnlyBuffer() read-only} and safely reuse it. */ @Nullable ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer challenge); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java index d5d4efd9c9d..b265b9ba463 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/auth/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java index 89d8365de78..60c44193577 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/DefaultDriverOption.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,157 +25,1023 @@ *

Refer to {@code reference.conf} in the driver codebase for a full description of each option. */ public enum DefaultDriverOption implements DriverOption { + /** + * The contact points to use for the initial connection to the cluster. + * + *

Value type: {@link java.util.List List}<{@link String}> + */ CONTACT_POINTS("basic.contact-points"), + /** + * A name that uniquely identifies the driver instance. + * + *

Value-type: {@link String} + */ SESSION_NAME("basic.session-name"), + /** + * The name of the keyspace that the session should initially be connected to. + * + *

Value-type: {@link String} + */ SESSION_KEYSPACE("basic.session-keyspace"), + /** + * How often the driver tries to reload the configuration. + * + *

Value-type: {@link java.time.Duration Duration} + */ CONFIG_RELOAD_INTERVAL("basic.config-reload-interval"), + /** + * How long the driver waits for a request to complete. + * + *

Value-type: {@link java.time.Duration Duration} + */ REQUEST_TIMEOUT("basic.request.timeout"), + /** + * The consistency level. + * + *

Value-Type: {@link String} + */ REQUEST_CONSISTENCY("basic.request.consistency"), + /** + * The page size. + * + *

Value-Type: int + */ REQUEST_PAGE_SIZE("basic.request.page-size"), + /** + * The serial consistency level. + * + *

Value-type: {@link String} + */ REQUEST_SERIAL_CONSISTENCY("basic.request.serial-consistency"), + /** + * The default idempotence of a request. + * + *

Value-type: boolean + */ REQUEST_DEFAULT_IDEMPOTENCE("basic.request.default-idempotence"), + // LOAD_BALANCING_POLICY is a collection of sub-properties LOAD_BALANCING_POLICY("basic.load-balancing-policy"), + /** + * The class of the load balancing policy. + * + *

Value-type: {@link String} + */ LOAD_BALANCING_POLICY_CLASS("basic.load-balancing-policy.class"), + /** + * The datacenter that is considered "local". + * + *

Value-type: {@link String} + */ LOAD_BALANCING_LOCAL_DATACENTER("basic.load-balancing-policy.local-datacenter"), + /** + * A custom filter to include/exclude nodes. + * + *

Value-Type: {@link String} + * + * @deprecated use {@link #LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS} instead. + */ + @Deprecated LOAD_BALANCING_FILTER_CLASS("basic.load-balancing-policy.filter.class"), + /** + * The timeout to use for internal queries that run as part of the initialization process + * + *

Value-type: {@link java.time.Duration Duration} + */ CONNECTION_INIT_QUERY_TIMEOUT("advanced.connection.init-query-timeout"), + /** + * The timeout to use when the driver changes the keyspace on a connection at runtime. + * + *

Value-type: {@link java.time.Duration Duration} + */ CONNECTION_SET_KEYSPACE_TIMEOUT("advanced.connection.set-keyspace-timeout"), + /** + * The maximum number of requests that can be executed concurrently on a connection + * + *

Value-type: int + */ CONNECTION_MAX_REQUESTS("advanced.connection.max-requests-per-connection"), + /** + * The maximum number of "orphaned" requests before a connection gets closed automatically. + * + *

Value-type: int + */ CONNECTION_MAX_ORPHAN_REQUESTS("advanced.connection.max-orphan-requests"), + /** + * Whether to log non-fatal errors when the driver tries to open a new connection. + * + *

Value-type: boolean + */ CONNECTION_WARN_INIT_ERROR("advanced.connection.warn-on-init-error"), + /** + * The number of connections in the LOCAL pool. + * + *

Value-type: int + */ CONNECTION_POOL_LOCAL_SIZE("advanced.connection.pool.local.size"), + /** + * The number of connections in the REMOTE pool. + * + *

Value-type: int + */ CONNECTION_POOL_REMOTE_SIZE("advanced.connection.pool.remote.size"), + /** + * Whether to schedule reconnection attempts if all contact points are unreachable on the first + * initialization attempt. + * + *

Value-type: boolean + */ RECONNECT_ON_INIT("advanced.reconnect-on-init"), + /** + * The class of the reconnection policy. + * + *

Value-type: {@link String} + */ RECONNECTION_POLICY_CLASS("advanced.reconnection-policy.class"), + /** + * Base delay for computing time between reconnection attempts. + * + *

Value-type: {@link java.time.Duration Duration} + */ RECONNECTION_BASE_DELAY("advanced.reconnection-policy.base-delay"), + /** + * Maximum delay between reconnection attempts. + * + *

Value-type: {@link java.time.Duration Duration} + */ RECONNECTION_MAX_DELAY("advanced.reconnection-policy.max-delay"), + // RETRY_POLICY is a collection of sub-properties RETRY_POLICY("advanced.retry-policy"), + /** + * The class of the retry policy. + * + *

Value-type: {@link String} + */ RETRY_POLICY_CLASS("advanced.retry-policy.class"), + // SPECULATIVE_EXECUTION_POLICY is a collection of sub-properties SPECULATIVE_EXECUTION_POLICY("advanced.speculative-execution-policy"), + /** + * The class of the speculative execution policy. + * + *

Value-type: {@link String} + */ SPECULATIVE_EXECUTION_POLICY_CLASS("advanced.speculative-execution-policy.class"), + /** + * The maximum number of executions. + * + *

Value-type: int + */ SPECULATIVE_EXECUTION_MAX("advanced.speculative-execution-policy.max-executions"), + /** + * The delay between each execution. + * + *

Value-type: {@link java.time.Duration Duration} + */ SPECULATIVE_EXECUTION_DELAY("advanced.speculative-execution-policy.delay"), + /** + * The class of the authentication provider. + * + *

Value-type: {@link String} + */ AUTH_PROVIDER_CLASS("advanced.auth-provider.class"), + /** + * Plain text auth provider username. + * + *

Value-type: {@link String} + */ AUTH_PROVIDER_USER_NAME("advanced.auth-provider.username"), + /** + * Plain text auth provider password. + * + *

Value-type: {@link String} + */ AUTH_PROVIDER_PASSWORD("advanced.auth-provider.password"), + /** + * The class of the SSL Engine Factory. + * + *

Value-type: {@link String} + */ SSL_ENGINE_FACTORY_CLASS("advanced.ssl-engine-factory.class"), + /** + * The cipher suites to enable when creating an SSLEngine for a connection. + * + *

Value type: {@link java.util.List List}<{@link String}> + */ SSL_CIPHER_SUITES("advanced.ssl-engine-factory.cipher-suites"), + /** + * Whether or not to require validation that the hostname of the server certificate's common name + * matches the hostname of the server being connected to. + * + *

Value-type: boolean + */ SSL_HOSTNAME_VALIDATION("advanced.ssl-engine-factory.hostname-validation"), + /** + * The location of the keystore file. + * + *

Value-type: {@link String} + */ SSL_KEYSTORE_PATH("advanced.ssl-engine-factory.keystore-path"), + /** + * The keystore password. + * + *

Value-type: {@link String} + */ SSL_KEYSTORE_PASSWORD("advanced.ssl-engine-factory.keystore-password"), + /** + * The location of the truststore file. + * + *

Value-type: {@link String} + */ SSL_TRUSTSTORE_PATH("advanced.ssl-engine-factory.truststore-path"), + /** + * The truststore password. + * + *

Value-type: {@link String} + */ SSL_TRUSTSTORE_PASSWORD("advanced.ssl-engine-factory.truststore-password"), + /** + * The class of the generator that assigns a microsecond timestamp to each request. + * + *

Value-type: {@link String} + */ TIMESTAMP_GENERATOR_CLASS("advanced.timestamp-generator.class"), + /** + * Whether to force the driver to use Java's millisecond-precision system clock. + * + *

Value-type: boolean + */ TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK("advanced.timestamp-generator.force-java-clock"), + /** + * How far in the future timestamps are allowed to drift before the warning is logged. + * + *

Value-type: {@link java.time.Duration Duration} + */ TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD( "advanced.timestamp-generator.drift-warning.threshold"), + /** + * How often the warning will be logged if timestamps keep drifting above the threshold. + * + *

Value-type: {@link java.time.Duration Duration} + */ TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL("advanced.timestamp-generator.drift-warning.interval"), + /** + * The class of a session-wide component that tracks the outcome of requests. + * + *

Value-type: {@link String} + * + * @deprecated Use {@link #REQUEST_TRACKER_CLASSES} instead. + */ + @Deprecated REQUEST_TRACKER_CLASS("advanced.request-tracker.class"), + /** + * Whether to log successful requests. + * + *

Value-type: boolean + */ REQUEST_LOGGER_SUCCESS_ENABLED("advanced.request-tracker.logs.success.enabled"), + /** + * The threshold to classify a successful request as "slow". + * + *

Value-type: {@link java.time.Duration Duration} + */ REQUEST_LOGGER_SLOW_THRESHOLD("advanced.request-tracker.logs.slow.threshold"), + /** + * Whether to log slow requests. + * + *

Value-type: boolean + */ REQUEST_LOGGER_SLOW_ENABLED("advanced.request-tracker.logs.slow.enabled"), + /** + * Whether to log failed requests. + * + *

Value-type: boolean + */ REQUEST_LOGGER_ERROR_ENABLED("advanced.request-tracker.logs.error.enabled"), + /** + * The maximum length of the query string in the log message. + * + *

Value-type: int + */ REQUEST_LOGGER_MAX_QUERY_LENGTH("advanced.request-tracker.logs.max-query-length"), + /** + * Whether to log bound values in addition to the query string. + * + *

Value-type: boolean + */ REQUEST_LOGGER_VALUES("advanced.request-tracker.logs.show-values"), + /** + * The maximum length for bound values in the log message. + * + *

Value-type: int + */ REQUEST_LOGGER_MAX_VALUE_LENGTH("advanced.request-tracker.logs.max-value-length"), + /** + * The maximum number of bound values to log. + * + *

Value-type: int + */ REQUEST_LOGGER_MAX_VALUES("advanced.request-tracker.logs.max-values"), + /** + * Whether to log stack traces for failed queries. + * + *

Value-type: boolean + */ REQUEST_LOGGER_STACK_TRACES("advanced.request-tracker.logs.show-stack-traces"), + /** + * The class of a session-wide component that controls the rate at which requests are executed. + * + *

Value-type: {@link String} + */ REQUEST_THROTTLER_CLASS("advanced.throttler.class"), + /** + * The maximum number of requests that are allowed to execute in parallel. + * + *

Value-type: int + */ REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS("advanced.throttler.max-concurrent-requests"), + /** + * The maximum allowed request rate. + * + *

Value-type: int + */ REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND("advanced.throttler.max-requests-per-second"), + /** + * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. + * + *

Value-type: int + */ REQUEST_THROTTLER_MAX_QUEUE_SIZE("advanced.throttler.max-queue-size"), + /** + * How often the throttler attempts to dequeue requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ REQUEST_THROTTLER_DRAIN_INTERVAL("advanced.throttler.drain-interval"), + /** + * The class of a session-wide component that listens for node state changes. + * + *

Value-type: {@link String} + * + * @deprecated Use {@link #METADATA_NODE_STATE_LISTENER_CLASSES} instead. + */ + @Deprecated METADATA_NODE_STATE_LISTENER_CLASS("advanced.node-state-listener.class"), + /** + * The class of a session-wide component that listens for schema changes. + * + *

Value-type: {@link String} + * + * @deprecated Use {@link #METADATA_SCHEMA_CHANGE_LISTENER_CLASSES} instead. + */ + @Deprecated METADATA_SCHEMA_CHANGE_LISTENER_CLASS("advanced.schema-change-listener.class"), + /** + * The class of the address translator to use to convert the addresses sent by Cassandra nodes + * into ones that the driver uses to connect. + * + *

Value-type: {@link String} + */ ADDRESS_TRANSLATOR_CLASS("advanced.address-translator.class"), + /** + * The native protocol version to use. + * + *

Value-type: {@link String} + */ PROTOCOL_VERSION("advanced.protocol.version"), + /** + * The name of the algorithm used to compress protocol frames. + * + *

Value-type: {@link String} + */ PROTOCOL_COMPRESSION("advanced.protocol.compression"), + /** + * The maximum length, in bytes, of the frames supported by the driver. + * + *

Value-type: long + */ PROTOCOL_MAX_FRAME_LENGTH("advanced.protocol.max-frame-length"), + /** + * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active + * keyspace. + * + *

Value-type: boolean + */ REQUEST_WARN_IF_SET_KEYSPACE("advanced.request.warn-if-set-keyspace"), + /** + * How many times the driver will attempt to fetch the query trace if it is not ready yet. + * + *

Value-type: int + */ REQUEST_TRACE_ATTEMPTS("advanced.request.trace.attempts"), + /** + * The interval between each attempt. + * + *

Value-type: {@link java.time.Duration Duration} + */ REQUEST_TRACE_INTERVAL("advanced.request.trace.interval"), + /** + * The consistency level to use for trace queries. + * + *

Value-type: {@link String} + */ REQUEST_TRACE_CONSISTENCY("advanced.request.trace.consistency"), + /** + * List of enabled session-level metrics. + * + *

Value type: {@link java.util.List List}<{@link String}> + */ METRICS_SESSION_ENABLED("advanced.metrics.session.enabled"), + /** + * List of enabled node-level metrics. + * + *

Value type: {@link java.util.List List}<{@link String}> + */ METRICS_NODE_ENABLED("advanced.metrics.node.enabled"), + /** + * The largest latency that we expect to record for requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ METRICS_SESSION_CQL_REQUESTS_HIGHEST("advanced.metrics.session.cql-requests.highest-latency"), + /** + * The number of significant decimal digits to which internal structures will maintain for + * requests. + * + *

Value-type: int + */ METRICS_SESSION_CQL_REQUESTS_DIGITS("advanced.metrics.session.cql-requests.significant-digits"), + /** + * The interval at which percentile data is refreshed for requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ METRICS_SESSION_CQL_REQUESTS_INTERVAL("advanced.metrics.session.cql-requests.refresh-interval"), + /** + * The largest latency that we expect to record for throttling. + * + *

Value-type: {@link java.time.Duration Duration} + */ METRICS_SESSION_THROTTLING_HIGHEST("advanced.metrics.session.throttling.delay.highest-latency"), + /** + * The number of significant decimal digits to which internal structures will maintain for + * throttling. + * + *

Value-type: int + */ METRICS_SESSION_THROTTLING_DIGITS("advanced.metrics.session.throttling.delay.significant-digits"), + /** + * The interval at which percentile data is refreshed for throttling. + * + *

Value-type: {@link java.time.Duration Duration} + */ METRICS_SESSION_THROTTLING_INTERVAL("advanced.metrics.session.throttling.delay.refresh-interval"), + /** + * The largest latency that we expect to record for requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ METRICS_NODE_CQL_MESSAGES_HIGHEST("advanced.metrics.node.cql-messages.highest-latency"), + /** + * The number of significant decimal digits to which internal structures will maintain for + * requests. + * + *

Value-type: int + */ METRICS_NODE_CQL_MESSAGES_DIGITS("advanced.metrics.node.cql-messages.significant-digits"), + /** + * The interval at which percentile data is refreshed for requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ METRICS_NODE_CQL_MESSAGES_INTERVAL("advanced.metrics.node.cql-messages.refresh-interval"), + /** + * Whether or not to disable the Nagle algorithm. + * + *

Value-type: boolean + */ SOCKET_TCP_NODELAY("advanced.socket.tcp-no-delay"), + /** + * Whether or not to enable TCP keep-alive probes. + * + *

Value-type: boolean + */ SOCKET_KEEP_ALIVE("advanced.socket.keep-alive"), + /** + * Whether or not to allow address reuse. + * + *

Value-type: boolean + */ SOCKET_REUSE_ADDRESS("advanced.socket.reuse-address"), + /** + * Sets the linger interval. + * + *

Value-type: int + */ SOCKET_LINGER_INTERVAL("advanced.socket.linger-interval"), + /** + * Sets a hint to the size of the underlying buffers for incoming network I/O. + * + *

Value-type: int + */ SOCKET_RECEIVE_BUFFER_SIZE("advanced.socket.receive-buffer-size"), + /** + * Sets a hint to the size of the underlying buffers for outgoing network I/O. + * + *

Value-type: int + */ SOCKET_SEND_BUFFER_SIZE("advanced.socket.send-buffer-size"), + /** + * The connection heartbeat interval. + * + *

Value-type: {@link java.time.Duration Duration} + */ HEARTBEAT_INTERVAL("advanced.heartbeat.interval"), + /** + * How long the driver waits for the response to a heartbeat. + * + *

Value-type: {@link java.time.Duration Duration} + */ HEARTBEAT_TIMEOUT("advanced.heartbeat.timeout"), + /** + * How long the driver waits to propagate a Topology event. + * + *

Value-type: {@link java.time.Duration Duration} + */ METADATA_TOPOLOGY_WINDOW("advanced.metadata.topology-event-debouncer.window"), + /** + * The maximum number of events that can accumulate. + * + *

Value-type: int + */ METADATA_TOPOLOGY_MAX_EVENTS("advanced.metadata.topology-event-debouncer.max-events"), + /** + * Whether schema metadata is enabled. + * + *

Value-type: boolean + */ METADATA_SCHEMA_ENABLED("advanced.metadata.schema.enabled"), + /** + * The timeout for the requests to the schema tables. + * + *

Value-type: {@link java.time.Duration Duration} + */ METADATA_SCHEMA_REQUEST_TIMEOUT("advanced.metadata.schema.request-timeout"), + /** + * The page size for the requests to the schema tables. + * + *

Value-type: int + */ METADATA_SCHEMA_REQUEST_PAGE_SIZE("advanced.metadata.schema.request-page-size"), + /** + * The list of keyspaces for which schema and token metadata should be maintained. + * + *

Value type: {@link java.util.List List}<{@link String}> + */ METADATA_SCHEMA_REFRESHED_KEYSPACES("advanced.metadata.schema.refreshed-keyspaces"), + /** + * How long the driver waits to apply a refresh. + * + *

Value-type: {@link java.time.Duration Duration} + */ METADATA_SCHEMA_WINDOW("advanced.metadata.schema.debouncer.window"), + /** + * The maximum number of refreshes that can accumulate. + * + *

Value-type: int + */ METADATA_SCHEMA_MAX_EVENTS("advanced.metadata.schema.debouncer.max-events"), + /** + * Whether token metadata is enabled. + * + *

Value-type: boolean + */ METADATA_TOKEN_MAP_ENABLED("advanced.metadata.token-map.enabled"), + /** + * How long the driver waits for responses to control queries. + * + *

Value-type: {@link java.time.Duration Duration} + */ CONTROL_CONNECTION_TIMEOUT("advanced.control-connection.timeout"), + /** + * The interval between each schema agreement check attempt. + * + *

Value-type: {@link java.time.Duration Duration} + */ CONTROL_CONNECTION_AGREEMENT_INTERVAL("advanced.control-connection.schema-agreement.interval"), + /** + * The timeout after which schema agreement fails. + * + *

Value-type: {@link java.time.Duration Duration} + */ CONTROL_CONNECTION_AGREEMENT_TIMEOUT("advanced.control-connection.schema-agreement.timeout"), + /** + * Whether to log a warning if schema agreement fails. + * + *

Value-type: boolean + */ CONTROL_CONNECTION_AGREEMENT_WARN("advanced.control-connection.schema-agreement.warn-on-failure"), + /** + * Whether `Session.prepare` calls should be sent to all nodes in the cluster. + * + *

Value-type: boolean + */ PREPARE_ON_ALL_NODES("advanced.prepared-statements.prepare-on-all-nodes"), + /** + * Whether the driver tries to prepare on new nodes at all. + * + *

Value-type: boolean + */ REPREPARE_ENABLED("advanced.prepared-statements.reprepare-on-up.enabled"), + /** + * Whether to check `system.prepared_statements` on the target node before repreparing. + * + *

Value-type: boolean + */ REPREPARE_CHECK_SYSTEM_TABLE("advanced.prepared-statements.reprepare-on-up.check-system-table"), + /** + * The maximum number of statements that should be reprepared. + * + *

Value-type: int + */ REPREPARE_MAX_STATEMENTS("advanced.prepared-statements.reprepare-on-up.max-statements"), + /** + * The maximum number of concurrent requests when repreparing. + * + *

Value-type: int + */ REPREPARE_MAX_PARALLELISM("advanced.prepared-statements.reprepare-on-up.max-parallelism"), + /** + * The request timeout when repreparing. + * + *

Value-type: {@link java.time.Duration Duration} + */ REPREPARE_TIMEOUT("advanced.prepared-statements.reprepare-on-up.timeout"), + /** + * The number of threads in the I/O group. + * + *

Value-type: int + */ NETTY_IO_SIZE("advanced.netty.io-group.size"), + /** + * Quiet period for I/O group shutdown. + * + *

Value-type: int + */ NETTY_IO_SHUTDOWN_QUIET_PERIOD("advanced.netty.io-group.shutdown.quiet-period"), + /** + * Max time to wait for I/O group shutdown. + * + *

Value-type: int + */ NETTY_IO_SHUTDOWN_TIMEOUT("advanced.netty.io-group.shutdown.timeout"), + /** + * Units for I/O group quiet period and timeout. + * + *

Value-type: {@link String} + */ NETTY_IO_SHUTDOWN_UNIT("advanced.netty.io-group.shutdown.unit"), + /** + * The number of threads in the Admin group. + * + *

Value-type: int + */ NETTY_ADMIN_SIZE("advanced.netty.admin-group.size"), + /** + * Quiet period for admin group shutdown. + * + *

Value-type: int + */ NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD("advanced.netty.admin-group.shutdown.quiet-period"), + /** + * Max time to wait for admin group shutdown. + * + *

Value-type: {@link String} + */ NETTY_ADMIN_SHUTDOWN_TIMEOUT("advanced.netty.admin-group.shutdown.timeout"), + /** + * Units for admin group quite period and timeout. + * + *

Value-type: {@link String} + */ NETTY_ADMIN_SHUTDOWN_UNIT("advanced.netty.admin-group.shutdown.unit"), + /** @deprecated This option was removed in version 4.6.1. */ + @Deprecated COALESCER_MAX_RUNS("advanced.coalescer.max-runs-with-no-work"), + /** + * The coalescer reschedule interval. + * + *

Value-type: {@link java.time.Duration Duration} + */ COALESCER_INTERVAL("advanced.coalescer.reschedule-interval"), + /** + * Whether to resolve the addresses passed to `basic.contact-points`. + * + *

Value-type: boolean + */ RESOLVE_CONTACT_POINTS("advanced.resolve-contact-points"), + /** + * This is how frequent the timer should wake up to check for timed-out tasks or speculative + * executions. + * + *

Value-type: {@link java.time.Duration Duration} + */ NETTY_TIMER_TICK_DURATION("advanced.netty.timer.tick-duration"), + /** + * Number of ticks in the Timer wheel. + * + *

Value-type: int + */ NETTY_TIMER_TICKS_PER_WHEEL("advanced.netty.timer.ticks-per-wheel"), + /** + * Whether logging of server warnings generated during query execution should be disabled by the + * driver. + * + *

Value-type: boolean + */ REQUEST_LOG_WARNINGS("advanced.request.log-warnings"), - ; + + /** + * Whether the threads created by the driver should be daemon threads. + * + *

Value-type: boolean + */ + NETTY_DAEMON("advanced.netty.daemon"), + + /** + * The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a + * service. + * + *

Value-type: {@link String} + */ + CLOUD_SECURE_CONNECT_BUNDLE("basic.cloud.secure-connect-bundle"), + + /** + * Whether the slow replica avoidance should be enabled in the default LBP. + * + *

Value-type: boolean + */ + LOAD_BALANCING_POLICY_SLOW_AVOIDANCE("basic.load-balancing-policy.slow-replica-avoidance"), + + /** + * The timeout to use when establishing driver connections. + * + *

Value-type: {@link java.time.Duration Duration} + */ + CONNECTION_CONNECT_TIMEOUT("advanced.connection.connect-timeout"), + + /** + * The maximum number of live sessions that are allowed to coexist in a given VM. + * + *

Value-type: int + */ + SESSION_LEAK_THRESHOLD("advanced.session-leak.threshold"), + /** + * The period of inactivity after which the node level metrics will be evicted. The eviction will + * happen only if none of the enabled node-level metrics is updated for a given node within this + * time window. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_NODE_EXPIRE_AFTER("advanced.metrics.node.expire-after"), + + /** + * The classname of the desired MetricsFactory implementation. + * + *

Value-type: {@link String} + */ + METRICS_FACTORY_CLASS("advanced.metrics.factory.class"), + + /** + * The maximum number of nodes from remote DCs to include in query plans. + * + *

Value-Type: int + */ + LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC( + "advanced.load-balancing-policy.dc-failover.max-nodes-per-remote-dc"), + /** + * Whether to consider nodes from remote DCs if the request's consistency level is local. + * + *

Value-Type: boolean + */ + LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS( + "advanced.load-balancing-policy.dc-failover.allow-for-local-consistency-levels"), + + /** + * The classname of the desired {@code MetricIdGenerator} implementation. + * + *

Value-type: {@link String} + */ + METRICS_ID_GENERATOR_CLASS("advanced.metrics.id-generator.class"), + + /** + * The value of the prefix to prepend to all metric names. + * + *

Value-type: {@link String} + */ + METRICS_ID_GENERATOR_PREFIX("advanced.metrics.id-generator.prefix"), + + /** + * The class name of a custom {@link + * com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator}. + * + *

Value-Type: {@link String} + */ + LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS("basic.load-balancing-policy.evaluator.class"), + + /** + * The shortest latency that we expect to record for requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_SESSION_CQL_REQUESTS_LOWEST("advanced.metrics.session.cql-requests.lowest-latency"), + /** + * Optional service-level objectives to meet, as a list of latencies to track. + * + *

Value-type: List of {@link java.time.Duration Duration} + */ + METRICS_SESSION_CQL_REQUESTS_SLO("advanced.metrics.session.cql-requests.slo"), + + /** + * The shortest latency that we expect to record for throttling. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_SESSION_THROTTLING_LOWEST("advanced.metrics.session.throttling.delay.lowest-latency"), + /** + * Optional service-level objectives to meet, as a list of latencies to track. + * + *

Value-type: List of {@link java.time.Duration Duration} + */ + METRICS_SESSION_THROTTLING_SLO("advanced.metrics.session.throttling.delay.slo"), + + /** + * The shortest latency that we expect to record for requests. + * + *

Value-type: {@link java.time.Duration Duration} + */ + METRICS_NODE_CQL_MESSAGES_LOWEST("advanced.metrics.node.cql-messages.lowest-latency"), + /** + * Optional service-level objectives to meet, as a list of latencies to track. + * + *

Value-type: List of {@link java.time.Duration Duration} + */ + METRICS_NODE_CQL_MESSAGES_SLO("advanced.metrics.node.cql-messages.slo"), + + /** + * Whether the prepared statements cache use weak values. + * + *

Value-type: boolean + */ + PREPARED_CACHE_WEAK_VALUES("advanced.prepared-statements.prepared-cache.weak-values"), + + /** + * The classes of session-wide components that track the outcome of requests. + * + *

Value-type: List of {@link String} + */ + REQUEST_TRACKER_CLASSES("advanced.request-tracker.classes"), + + /** + * The classes of session-wide components that listen for node state changes. + * + *

Value-type: List of {@link String} + */ + METADATA_NODE_STATE_LISTENER_CLASSES("advanced.node-state-listener.classes"), + + /** + * The classes of session-wide components that listen for schema changes. + * + *

Value-type: List of {@link String} + */ + METADATA_SCHEMA_CHANGE_LISTENER_CLASSES("advanced.schema-change-listener.classes"), + /** + * Optional list of percentiles to publish for cql-requests metric. Produces an additional time + * series for each requested percentile. This percentile is computed locally, and so can't be + * aggregated with percentiles computed across other dimensions (e.g. in a different instance). + * + *

Value type: {@link java.util.List List}<{@link Double}> + */ + METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES( + "advanced.metrics.session.cql-requests.publish-percentiles"), + /** + * Optional list of percentiles to publish for node cql-messages metric. Produces an additional + * time series for each requested percentile. This percentile is computed locally, and so can't be + * aggregated with percentiles computed across other dimensions (e.g. in a different instance). + * + *

Value type: {@link java.util.List List}<{@link Double}> + */ + METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES( + "advanced.metrics.node.cql-messages.publish-percentiles"), + /** + * Optional list of percentiles to publish for throttling delay metric.Produces an additional time + * series for each requested percentile. This percentile is computed locally, and so can't be + * aggregated with percentiles computed across other dimensions (e.g. in a different instance). + * + *

Value type: {@link java.util.List List}<{@link Double}> + */ + METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES( + "advanced.metrics.session.throttling.delay.publish-percentiles"), + /** + * Adds histogram buckets used to generate aggregable percentile approximations in monitoring + * systems that have query facilities to do so (e.g. Prometheus histogram_quantile, Atlas + * percentiles). + * + *

Value-type: boolean + */ + METRICS_GENERATE_AGGREGABLE_HISTOGRAMS("advanced.metrics.histograms.generate-aggregable"), + /** + * The duration between attempts to reload the keystore. + * + *

Value-type: {@link java.time.Duration} + */ + SSL_KEYSTORE_RELOAD_INTERVAL("advanced.ssl-engine-factory.keystore-reload-interval"), + /** + * Ordered preference list of remote dcs optionally supplied for automatic failover. + * + *

Value type: {@link java.util.List List}<{@link String}> + */ + LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS( + "advanced.load-balancing-policy.dc-failover.preferred-remote-dcs"), + /** + * Whether or not to do a DNS reverse-lookup of provided server addresses for SAN addresses. + * + *

Value-type: boolean + */ + SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN("advanced.ssl-engine-factory.allow-dns-reverse-lookup-san"), + /** + * The class of session-wide component that generates request IDs. + * + *

Value-type: {@link String} + */ + REQUEST_ID_GENERATOR_CLASS("advanced.request-id.generator.class"), + /** + * An address to always translate all node addresses to that same proxy hostname no matter what IP + * address a node has, but still using its native transport port. + * + *

Value-Type: {@link String} + */ + ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME("advanced.address-translator.advertised-hostname"), + /** + * A map of Cassandra node subnets (CIDR notations) to target addresses, for example (note quoted + * keys): + * + *

+   * advanced.address-translator.subnet-addresses {
+   *   "100.64.0.0/15" = "cassandra.datacenter1.com:9042"
+   *   "100.66.0.0/15" = "cassandra.datacenter2.com:9042"
+   *   # IPv6 example:
+   *   # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042"
+   *   # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042"
+   * }
+   * 
+ * + * Note: subnets must be represented as prefix blocks, see {@link + * inet.ipaddr.Address#isPrefixBlock()}. + * + *

Value type: {@link java.util.Map Map}<{@link String},{@link String}> + */ + ADDRESS_TRANSLATOR_SUBNET_ADDRESSES("advanced.address-translator.subnet-addresses"), + /** + * A default address to fallback to if Cassandra node IP isn't contained in any of the configured + * subnets. + * + *

Value-Type: {@link String} + */ + ADDRESS_TRANSLATOR_DEFAULT_ADDRESS("advanced.address-translator.default-address"), + /** + * Whether to resolve the addresses on initialization (if true) or on each node (re-)connection + * (if false). Defaults to false. + * + *

Value-Type: boolean + */ + ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES("advanced.address-translator.resolve-addresses"); private final String path; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java index fae096123c2..88519c82a22 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfig.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java index 04c2b156f1b..15fae232d17 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverConfigLoader.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +19,14 @@ import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.internal.core.config.composite.CompositeDriverConfigLoader; +import com.datastax.oss.driver.internal.core.config.map.MapBasedDriverConfigLoader; import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; import com.datastax.oss.driver.internal.core.config.typesafe.DefaultProgrammaticDriverConfigLoaderBuilder; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; import edu.umd.cs.findbugs.annotations.NonNull; import java.io.File; import java.net.URL; +import java.nio.file.Path; import java.util.concurrent.CompletionStage; /** @@ -33,10 +36,29 @@ */ public interface DriverConfigLoader extends AutoCloseable { + /** + * Builds an instance using the driver's default implementation (based on Typesafe config) except + * that application-specific classpath resources will be located using the provided {@link + * ClassLoader} instead of {@linkplain Thread#getContextClassLoader() the current thread's context + * class loader}. + * + *

The returned loader will honor the reload interval defined by the option {@code + * basic.config-reload-interval}. + */ + @NonNull + static DriverConfigLoader fromDefaults(@NonNull ClassLoader appClassLoader) { + return new DefaultDriverConfigLoader(appClassLoader); + } + /** * Builds an instance using the driver's default implementation (based on Typesafe config), except * that application-specific options are loaded from a classpath resource with a custom name. * + *

The class loader used to locate application-specific classpath resources is {@linkplain + * Thread#getContextClassLoader() the current thread's context class loader}. This might not be + * suitable for OSGi deployments, which should use {@link #fromClasspath(String, ClassLoader)} + * instead. + * *

More precisely, configuration properties are loaded and merged from the following * (first-listed are higher priority): * @@ -57,16 +79,43 @@ public interface DriverConfigLoader extends AutoCloseable { */ @NonNull static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseResourcesAnySyntax(resourceBaseName)) - .withFallback(ConfigFactory.defaultReference()) - .resolve(); - return config.getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - }); + return fromClasspath(resourceBaseName, Thread.currentThread().getContextClassLoader()); + } + + /** + * Just like {@link #fromClasspath(java.lang.String)} except that application-specific classpath + * resources will be located using the provided {@link ClassLoader} instead of {@linkplain + * Thread#getContextClassLoader() the current thread's context class loader}. + */ + @NonNull + static DriverConfigLoader fromClasspath( + @NonNull String resourceBaseName, @NonNull ClassLoader appClassLoader) { + return DefaultDriverConfigLoader.fromClasspath(resourceBaseName, appClassLoader); + } + + /** + * Builds an instance using the driver's default implementation (based on Typesafe config), except + * that application-specific options are loaded from the given path. + * + *

More precisely, configuration properties are loaded and merged from the following + * (first-listed are higher priority): + * + *

    + *
  • system properties + *
  • the contents of {@code file} + *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this + * will load the {@code reference.conf} included in the core driver JAR, that defines + * default options for all mandatory options. + *
+ * + * The resulting configuration is expected to contain a {@code datastax-java-driver} section. + * + *

The returned loader will honor the reload interval defined by the option {@code + * basic.config-reload-interval}. + */ + @NonNull + static DriverConfigLoader fromPath(@NonNull Path file) { + return fromFile(file.toFile()); } /** @@ -91,16 +140,7 @@ static DriverConfigLoader fromClasspath(@NonNull String resourceBaseName) { */ @NonNull static DriverConfigLoader fromFile(@NonNull File file) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseFileAnySyntax(file)) - .withFallback(ConfigFactory.defaultReference()) - .resolve(); - return config.getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - }); + return DefaultDriverConfigLoader.fromFile(file); } /** @@ -125,21 +165,45 @@ static DriverConfigLoader fromFile(@NonNull File file) { */ @NonNull static DriverConfigLoader fromUrl(@NonNull URL url) { - return new DefaultDriverConfigLoader( - () -> { - ConfigFactory.invalidateCaches(); - Config config = - ConfigFactory.defaultOverrides() - .withFallback(ConfigFactory.parseURL(url)) - .withFallback(ConfigFactory.defaultReference()) - .resolve(); - return config.getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - }); + return DefaultDriverConfigLoader.fromUrl(url); + } + + /** + * Builds an instance using the driver's default implementation (based on Typesafe config), except + * that application-specific options are parsed from the given string. + * + *

The string must be in HOCON format and contain a {@code datastax-java-driver} section. + * Options must be separated by line breaks: + * + *

+   * DriverConfigLoader.fromString(
+   *         "datastax-java-driver.basic { session-name = my-app\nrequest.timeout = 1 millisecond }")
+   * 
+ * + *

More precisely, configuration properties are loaded and merged from the following + * (first-listed are higher priority): + * + *

    + *
  • system properties + *
  • the config in {@code contents} + *
  • {@code reference.conf} (all resources on classpath with this name). In particular, this + * will load the {@code reference.conf} included in the core driver JAR, that defines + * default options for all mandatory options. + *
+ * + *

This loader does not support runtime reloading. + */ + @NonNull + static DriverConfigLoader fromString(@NonNull String contents) { + return DefaultDriverConfigLoader.fromString(contents); } /** * Starts a builder that allows configuration options to be overridden programmatically. * + *

Note that {@link #fromMap(OptionsMap)} provides an alternative approach for programmatic + * configuration, that might be more convenient if you wish to completely bypass Typesafe config. + * *

For example: * *

{@code
@@ -183,18 +247,96 @@ static DriverConfigLoader fromUrl(@NonNull URL url) {
    * Note that {@code application.*} is entirely optional, you may choose to only rely on the
    * driver's built-in {@code reference.conf} and programmatic overrides.
    *
+   * 

The class loader used to locate application-specific classpath resources is {@linkplain + * Thread#getContextClassLoader() the current thread's context class loader}. This might not be + * suitable for OSGi deployments, which should use {@link #programmaticBuilder(ClassLoader)} + * instead. + * *

The resulting configuration is expected to contain a {@code datastax-java-driver} section. * *

The loader will honor the reload interval defined by the option {@code * basic.config-reload-interval}. * *

Note that the returned builder is not thread-safe. + * + * @see #fromMap(OptionsMap) */ @NonNull static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { return new DefaultProgrammaticDriverConfigLoaderBuilder(); } + /** + * Just like {@link #programmaticBuilder()} except that application-specific classpath resources + * will be located using the provided {@link ClassLoader} instead of {@linkplain + * Thread#getContextClassLoader() the current thread's context class loader}. + */ + @NonNull + static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder( + @NonNull ClassLoader appClassLoader) { + return new DefaultProgrammaticDriverConfigLoaderBuilder(appClassLoader); + } + + /** + * Builds an instance backed by an {@link OptionsMap}, which holds all options in memory. + * + *

This is the simplest implementation. It is intended for clients who wish to completely + * bypass Typesafe config, and instead manage the configuration programmatically. A typical + * example is a third-party tool that already has its own configuration file, and doesn't want to + * introduce a separate mechanism for driver options. + * + *

With this loader, the driver's built-in {@code reference.conf} file is ignored, the provided + * {@link OptionsMap} must explicitly provide all mandatory options. Note however that {@link + * OptionsMap#driverDefaults()} allows you to initialize an instance with the same default values + * as {@code reference.conf}. + * + *

+   * // This creates a configuration equivalent to the built-in reference.conf:
+   * OptionsMap map = OptionsMap.driverDefaults();
+   *
+   * // Customize an option:
+   * map.put(TypedDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5));
+   *
+   * DriverConfigLoader loader = DriverConfigLoader.fromMap(map);
+   * CqlSession session = CqlSession.builder()
+   *     .withConfigLoader(loader)
+   *     .build();
+   * 
+ * + *

If the {@link OptionsMap} is modified at runtime, this will be reflected immediately in the + * configuration, you don't need to call {@link #reload()}. Note however that, depending on the + * option, the driver might not react to a configuration change immediately, or ever (this is + * documented in {@code reference.conf}). + * + * @since 4.6.0 + */ + @NonNull + static DriverConfigLoader fromMap(@NonNull OptionsMap source) { + return new MapBasedDriverConfigLoader(source, source.asRawMap()); + } + + /** + * Composes two existing config loaders to form a new one. + * + *

When the driver reads an option, the "primary" config will be queried first. If the option + * is missing, then it will be looked up in the "fallback" config. + * + *

All execution profiles will be surfaced in the new config. If a profile is defined both in + * the primary and the fallback config, its options will be merged using the same precedence rules + * as described above. + * + *

The new config is reloadable if at least one of the input configs is. If you invoke {@link + * DriverConfigLoader#reload()} on the new loader, it will reload whatever is reloadable, or fail + * if nothing is. If the input loaders have periodic reloading built-in, each one will reload at + * its own pace, and the changes will be reflected in the new config. + */ + @NonNull + static DriverConfigLoader compose( + @NonNull DriverConfigLoader primaryConfigLoader, + @NonNull DriverConfigLoader fallbackConfigLoader) { + return new CompositeDriverConfigLoader(primaryConfigLoader, fallbackConfigLoader); + } + /** * Loads the first configuration that will be used to initialize the driver. * @@ -211,26 +353,32 @@ static ProgrammaticDriverConfigLoaderBuilder programmaticBuilder() { void onDriverInit(@NonNull DriverContext context); /** - * Triggers an immediate reload attempt. + * Triggers an immediate reload attempt and returns a stage that completes once the attempt is + * finished, with a boolean indicating whether the configuration changed as a result of this + * reload. * - * @return a stage that completes once the attempt is finished, with a boolean indicating whether - * the configuration changed as a result of this reload. If so, it's also guaranteed that - * internal driver components have been notified by that time; note however that some react to - * the notification asynchronously, so they may not have completely applied all resulting - * changes yet. If this loader does not support programmatic reloading — which you can - * check by calling {@link #supportsReloading()} before this method — the returned - * object will fail immediately with an {@link UnsupportedOperationException}. + *

If so, it's also guaranteed that internal driver components have been notified by that time; + * note however that some react to the notification asynchronously, so they may not have + * completely applied all resulting changes yet. + * + *

If this loader does not support programmatic reloading — which you can check by + * calling {@link #supportsReloading()} before this method — the returned stage should fail + * immediately with an {@link UnsupportedOperationException}. The default implementation of this + * interface does support programmatic reloading however, and never returns a failed stage. */ @NonNull CompletionStage reload(); /** * Whether this implementation supports programmatic reloading with the {@link #reload()} method. + * + *

The default implementation of this interface does support programmatic reloading and always + * returns true. */ boolean supportsReloading(); /** - * Called when the cluster closes. This is a good time to release any external resource, for + * Called when the session closes. This is a good time to release any external resource, for * example cancel a scheduled reloading task. */ @Override diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java index 600b2709065..89c28f0f521 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.oss.driver.api.core.config; +import com.datastax.oss.driver.internal.core.config.DerivedExecutionProfile; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.time.Duration; @@ -182,12 +186,28 @@ default List getDurationList( /** * Returns a representation of all the child options under a given option. * - *

This is only used to compare configuration sections across profiles, so the actual - * implementation does not matter, as long as identical sections (same options with same values, - * regardless of order) compare as equal and have the same {@code hashCode()}. + *

This is used by the driver at initialization time, to compare profiles and determine if it + * must create per-profile policies. For example, if two profiles have the same options in the + * {@code basic.load-balancing-policy} section, they will share the same policy instance. But if + * their options differ, two separate instances will be created. + * + *

The runtime return type does not matter, as long as identical sections (same options with + * same values, regardless of order) compare as equal and have the same {@code hashCode()}. The + * default implementation builds a map based on the entries from {@link #entrySet()}, it should be + * good for most cases. */ @NonNull - Object getComparisonKey(@NonNull DriverOption option); + default Object getComparisonKey(@NonNull DriverOption option) { + // This method is only used during driver initialization, performance is not crucial + String prefix = option.getPath(); + ImmutableMap.Builder childOptions = ImmutableMap.builder(); + for (Map.Entry entry : entrySet()) { + if (entry.getKey().startsWith(prefix)) { + childOptions.put(entry.getKey(), entry.getValue()); + } + } + return childOptions.build(); + } /** * Enumerates all the entries in this profile, including those that were inherited from another @@ -201,4 +221,109 @@ default List getDurationList( */ @NonNull SortedSet> entrySet(); + + @NonNull + @Override + default DriverExecutionProfile withBoolean(@NonNull DriverOption option, boolean value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withBooleanList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withInt(@NonNull DriverOption option, int value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withIntList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withLong(@NonNull DriverOption option, long value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withLongList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withDouble(@NonNull DriverOption option, double value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withDoubleList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withString(@NonNull DriverOption option, @NonNull String value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withStringList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withStringMap( + @NonNull DriverOption option, @NonNull Map value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withBytes(@NonNull DriverOption option, long value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withBytesList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withDuration( + @NonNull DriverOption option, @NonNull Duration value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile withDurationList( + @NonNull DriverOption option, @NonNull List value) { + return DerivedExecutionProfile.with(this, option, value); + } + + @NonNull + @Override + default DriverExecutionProfile without(@NonNull DriverOption option) { + return DerivedExecutionProfile.without(this, option); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java index 3213dc4b2ad..2f15b701f36 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/DriverOption.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java index 0345150d770..2c931bbfa91 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/OngoingConfigOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +21,7 @@ import java.time.Duration; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** An object where config options can be set programmatically. */ public interface OngoingConfigOptions> { @@ -59,6 +62,15 @@ default SelfT withClass(@NonNull DriverOption option, @NonNull Class value) { return withString(option, value.getName()); } + /** + * Note that this is just a shortcut to call {@link #withStringList(DriverOption, List)} with + * class names obtained from {@link Class#getName()}. + */ + @NonNull + default SelfT withClassList(@NonNull DriverOption option, @NonNull List> values) { + return withStringList(option, values.stream().map(Class::getName).collect(Collectors.toList())); + } + @NonNull SelfT withStringList(@NonNull DriverOption option, @NonNull List value); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java new file mode 100644 index 00000000000..98faf3e590c --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/OptionsMap.java @@ -0,0 +1,403 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.config; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; +import net.jcip.annotations.Immutable; +import net.jcip.annotations.ThreadSafe; + +/** + * An in-memory repository of config options, for use with {@link + * DriverConfigLoader#fromMap(OptionsMap)}. + * + *

This class is intended for clients who wish to assemble the driver configuration in memory, + * instead of loading it from configuration files. Note that {@link #driverDefaults()} can be used + * to pre-initialize the map with the driver's built-in defaults. + * + *

It functions like a two-dimensional map indexed by execution profile and option. All methods + * have a profile-less variant that applies to the default profile, for example {@link #get(String, + * TypedDriverOption)} and {@link #get(TypedDriverOption)}. Options are represented by {@link + * TypedDriverOption}, which allows this class to enforce additional type-safety guarantees (an + * option can only be set to a value of its intended type). + * + *

This class is mutable and thread-safe. Live changes are reflected in real time to the driver + * session(s) that use this configuration. + * + * @since 4.6.0 + */ +@ThreadSafe +public class OptionsMap implements Serializable { + + private static final long serialVersionUID = 1; + + /** + * Creates a new instance that contains the driver's default configuration. + * + *

This will produce a configuration that is equivalent to the {@code reference.conf} file + * bundled with the driver (however, this method does not load any file, and doesn't require + * Typesafe config in the classpath). + */ + @NonNull + public static OptionsMap driverDefaults() { + OptionsMap source = new OptionsMap(); + fillWithDriverDefaults(source); + return source; + } + + private final ConcurrentHashMap> map; + + private final List> changeListeners = new CopyOnWriteArrayList<>(); + + public OptionsMap() { + this(new ConcurrentHashMap<>()); + } + + private OptionsMap(ConcurrentHashMap> map) { + this.map = map; + } + + /** + * Associates the specified value for the specified option, in the specified execution profile. + * + * @return the previous value associated with {@code option}, or {@code null} if the option was + * not defined. + */ + @Nullable + public ValueT put( + @NonNull String profile, @NonNull TypedDriverOption option, @NonNull ValueT value) { + Objects.requireNonNull(option, "option"); + Objects.requireNonNull(value, "value"); + Object previous = getProfileMap(profile).put(option.getRawOption(), value); + if (!value.equals(previous)) { + for (Consumer listener : changeListeners) { + listener.accept(this); + } + } + return cast(previous); + } + + /** + * Associates the specified value for the specified option, in the default execution profile. + * + * @return the previous value associated with {@code option}, or {@code null} if the option was + * not defined. + */ + @Nullable + public ValueT put(@NonNull TypedDriverOption option, @NonNull ValueT value) { + return put(DriverExecutionProfile.DEFAULT_NAME, option, value); + } + + /** + * Returns the value to which the specified option is mapped in the specified profile, or {@code + * null} if the option is not defined. + */ + @Nullable + public ValueT get(@NonNull String profile, @NonNull TypedDriverOption option) { + Objects.requireNonNull(option, "option"); + Object result = getProfileMap(profile).get(option.getRawOption()); + return cast(result); + } + + /** + * Returns the value to which the specified option is mapped in the default profile, or {@code + * null} if the option is not defined. + */ + @Nullable + public ValueT get(@NonNull TypedDriverOption option) { + return get(DriverExecutionProfile.DEFAULT_NAME, option); + } + + /** + * Removes the specified option from the specified profile. + * + * @return the previous value associated with {@code option}, or {@code null} if the option was + * not defined. + */ + @Nullable + public ValueT remove( + @NonNull String profile, @NonNull TypedDriverOption option) { + Objects.requireNonNull(option, "option"); + Object previous = getProfileMap(profile).remove(option.getRawOption()); + if (previous != null) { + for (Consumer listener : changeListeners) { + listener.accept(this); + } + } + return cast(previous); + } + + /** + * Removes the specified option from the default profile. + * + * @return the previous value associated with {@code option}, or {@code null} if the option was + * not defined. + */ + @Nullable + public ValueT remove(@NonNull TypedDriverOption option) { + return remove(DriverExecutionProfile.DEFAULT_NAME, option); + } + + /** + * Registers a listener that will get notified when this object changes. + * + *

This is mostly for internal use by the driver. Note that listeners are transient, and not + * taken into account by {@link #equals(Object)} and {@link #hashCode()}. + */ + public void addChangeListener(@NonNull Consumer listener) { + changeListeners.add(Objects.requireNonNull(listener)); + } + + /** + * Unregisters a listener that was previously registered with {@link + * #addChangeListener(Consumer)}. + * + * @return {@code true} if the listener was indeed registered for this object. + */ + public boolean removeChangeListener(@NonNull Consumer listener) { + return changeListeners.remove(Objects.requireNonNull(listener)); + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof OptionsMap) { + OptionsMap that = (OptionsMap) other; + return this.map.equals(that.map); + } else { + return false; + } + } + + @Override + public int hashCode() { + return map.hashCode(); + } + + /** + * Returns a live view of this object, using the driver's untyped {@link DriverOption}. + * + *

This is intended for internal usage by the driver. Modifying the resulting map is strongly + * discouraged, as it could break the type-safety guarantees provided by the public methods. + */ + @NonNull + protected Map> asRawMap() { + return map; + } + + @NonNull + private Map getProfileMap(@NonNull String profile) { + Objects.requireNonNull(profile, "profile"); + return map.computeIfAbsent(profile, p -> new ConcurrentHashMap<>()); + } + + // Isolate the suppressed warning for retrieval. The cast should always succeed unless the user + // messes with asMap() directly. + @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) + @Nullable + private ValueT cast(@Nullable Object value) { + return (ValueT) value; + } + + /** + * This object gets replaced by an internal proxy for serialization. + * + * @serialData the serialized form of the {@code Map>} used to + * store options internally (listeners are transient). + */ + private Object writeReplace() { + return new SerializationProxy(this.map); + } + + // Should never be called since we serialize a proxy + @SuppressWarnings("UnusedVariable") + private void readObject(ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + protected static void fillWithDriverDefaults(OptionsMap map) { + Duration initQueryTimeout = Duration.ofSeconds(5); + Duration requestTimeout = Duration.ofSeconds(2); + int requestPageSize = 5000; + int continuousMaxPages = 0; + int continuousMaxPagesPerSecond = 0; + int continuousMaxEnqueuedPages = 4; + + // Sorted by order of appearance in reference.conf: + + // Skip CONFIG_RELOAD_INTERVAL because the map-based config doesn't need periodic reloading + map.put(TypedDriverOption.REQUEST_TIMEOUT, requestTimeout); + map.put(TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_ONE"); + map.put(TypedDriverOption.REQUEST_PAGE_SIZE, requestPageSize); + map.put(TypedDriverOption.REQUEST_SERIAL_CONSISTENCY, "SERIAL"); + map.put(TypedDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, false); + map.put(TypedDriverOption.GRAPH_TRAVERSAL_SOURCE, "g"); + map.put(TypedDriverOption.LOAD_BALANCING_POLICY_CLASS, "DefaultLoadBalancingPolicy"); + map.put(TypedDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true); + map.put(TypedDriverOption.SESSION_LEAK_THRESHOLD, 4); + map.put(TypedDriverOption.CONNECTION_CONNECT_TIMEOUT, Duration.ofSeconds(5)); + map.put(TypedDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, initQueryTimeout); + map.put(TypedDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT, initQueryTimeout); + map.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); + map.put(TypedDriverOption.CONNECTION_POOL_REMOTE_SIZE, 1); + map.put(TypedDriverOption.CONNECTION_MAX_REQUESTS, 1024); + map.put(TypedDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS, 256); + map.put(TypedDriverOption.CONNECTION_WARN_INIT_ERROR, true); + map.put(TypedDriverOption.RECONNECT_ON_INIT, false); + map.put(TypedDriverOption.RECONNECTION_POLICY_CLASS, "ExponentialReconnectionPolicy"); + map.put(TypedDriverOption.RECONNECTION_BASE_DELAY, Duration.ofSeconds(1)); + map.put(TypedDriverOption.RECONNECTION_MAX_DELAY, Duration.ofSeconds(60)); + map.put(TypedDriverOption.RETRY_POLICY_CLASS, "DefaultRetryPolicy"); + map.put(TypedDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, "NoSpeculativeExecutionPolicy"); + map.put(TypedDriverOption.TIMESTAMP_GENERATOR_CLASS, "AtomicTimestampGenerator"); + map.put(TypedDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, Duration.ofSeconds(1)); + map.put(TypedDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL, Duration.ofSeconds(10)); + map.put(TypedDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, false); + map.put(TypedDriverOption.REQUEST_THROTTLER_CLASS, "PassThroughRequestThrottler"); + map.put(TypedDriverOption.ADDRESS_TRANSLATOR_CLASS, "PassThroughAddressTranslator"); + map.put(TypedDriverOption.RESOLVE_CONTACT_POINTS, true); + map.put(TypedDriverOption.PROTOCOL_MAX_FRAME_LENGTH, 256L * 1024 * 1024); + map.put(TypedDriverOption.REQUEST_WARN_IF_SET_KEYSPACE, true); + map.put(TypedDriverOption.REQUEST_TRACE_ATTEMPTS, 5); + map.put(TypedDriverOption.REQUEST_TRACE_INTERVAL, Duration.ofMillis(3)); + map.put(TypedDriverOption.REQUEST_TRACE_CONSISTENCY, "ONE"); + map.put(TypedDriverOption.REQUEST_LOG_WARNINGS, true); + map.put(TypedDriverOption.GRAPH_PAGING_ENABLED, "AUTO"); + map.put(TypedDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, requestPageSize); + map.put(TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, continuousMaxPages); + map.put( + TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, + continuousMaxPagesPerSecond); + map.put( + TypedDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, continuousMaxEnqueuedPages); + map.put(TypedDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, requestPageSize); + map.put(TypedDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, false); + map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, continuousMaxPages); + map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, continuousMaxPagesPerSecond); + map.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, continuousMaxEnqueuedPages); + map.put(TypedDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(2)); + map.put(TypedDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(1)); + map.put(TypedDriverOption.MONITOR_REPORTING_ENABLED, true); + map.put(TypedDriverOption.METRICS_SESSION_ENABLED, Collections.emptyList()); + map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, Duration.ofSeconds(3)); + map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, Duration.ofMillis(1)); + map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, 3); + map.put(TypedDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL, Duration.ofMinutes(5)); + map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, Duration.ofSeconds(3)); + map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_LOWEST, Duration.ofMillis(1)); + map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_DIGITS, 3); + map.put(TypedDriverOption.METRICS_SESSION_THROTTLING_INTERVAL, Duration.ofMinutes(5)); + map.put( + TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, + Duration.ofMinutes(2)); + map.put( + TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, + Duration.ofMillis(10)); + map.put(TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, 3); + map.put( + TypedDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL, + Duration.ofMinutes(5)); + map.put(TypedDriverOption.METRICS_FACTORY_CLASS, "DefaultMetricsFactory"); + map.put(TypedDriverOption.METRICS_ID_GENERATOR_CLASS, "DefaultMetricIdGenerator"); + map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, Duration.ofSeconds(12)); + map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, Duration.ofMillis(1)); + map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, 3); + map.put(TypedDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL, Duration.ofMinutes(5)); + map.put(TypedDriverOption.METRICS_NODE_ENABLED, Collections.emptyList()); + map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, Duration.ofSeconds(3)); + map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, Duration.ofMillis(1)); + map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, 3); + map.put(TypedDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL, Duration.ofMinutes(5)); + map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, Duration.ofSeconds(3)); + map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, Duration.ofMillis(1)); + map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, 3); + map.put(TypedDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL, Duration.ofMinutes(5)); + map.put(TypedDriverOption.METRICS_NODE_EXPIRE_AFTER, Duration.ofHours(1)); + map.put(TypedDriverOption.SOCKET_TCP_NODELAY, true); + map.put(TypedDriverOption.HEARTBEAT_INTERVAL, Duration.ofSeconds(30)); + map.put(TypedDriverOption.HEARTBEAT_TIMEOUT, initQueryTimeout); + map.put(TypedDriverOption.METADATA_TOPOLOGY_WINDOW, Duration.ofSeconds(1)); + map.put(TypedDriverOption.METADATA_TOPOLOGY_MAX_EVENTS, 20); + map.put(TypedDriverOption.METADATA_SCHEMA_ENABLED, true); + map.put( + TypedDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, + ImmutableList.of("!system", "!/^system_.*/", "!/^dse_.*/", "!solr_admin", "!OpsCenter")); + map.put(TypedDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT, requestTimeout); + map.put(TypedDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE, requestPageSize); + map.put(TypedDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(1)); + map.put(TypedDriverOption.METADATA_SCHEMA_MAX_EVENTS, 20); + map.put(TypedDriverOption.METADATA_TOKEN_MAP_ENABLED, true); + map.put(TypedDriverOption.CONTROL_CONNECTION_TIMEOUT, initQueryTimeout); + map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL, Duration.ofMillis(200)); + map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, Duration.ofSeconds(10)); + map.put(TypedDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN, true); + map.put(TypedDriverOption.PREPARE_ON_ALL_NODES, true); + map.put(TypedDriverOption.REPREPARE_ENABLED, true); + map.put(TypedDriverOption.REPREPARE_CHECK_SYSTEM_TABLE, false); + map.put(TypedDriverOption.REPREPARE_MAX_STATEMENTS, 0); + map.put(TypedDriverOption.REPREPARE_MAX_PARALLELISM, 100); + map.put(TypedDriverOption.REPREPARE_TIMEOUT, initQueryTimeout); + map.put(TypedDriverOption.NETTY_DAEMON, false); + map.put(TypedDriverOption.NETTY_IO_SIZE, 0); + map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD, 2); + map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT, 15); + map.put(TypedDriverOption.NETTY_IO_SHUTDOWN_UNIT, "SECONDS"); + map.put(TypedDriverOption.NETTY_ADMIN_SIZE, 2); + map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD, 2); + map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT, 15); + map.put(TypedDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT, "SECONDS"); + map.put(TypedDriverOption.NETTY_TIMER_TICK_DURATION, Duration.ofMillis(100)); + map.put(TypedDriverOption.NETTY_TIMER_TICKS_PER_WHEEL, 2048); + map.put(TypedDriverOption.COALESCER_INTERVAL, Duration.of(10, ChronoUnit.MICROS)); + map.put(TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, 0); + map.put(TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, false); + map.put(TypedDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS, true); + map.put( + TypedDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS, ImmutableList.of("")); + } + + @Immutable + private static class SerializationProxy implements Serializable { + + private static final long serialVersionUID = 1L; + + private final ConcurrentHashMap> map; + + private SerializationProxy(ConcurrentHashMap> map) { + this.map = map; + } + + private Object readResolve() { + return new OptionsMap(map); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java index 2f6aa64485e..c3ae1d1bf5b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/ProgrammaticDriverConfigLoaderBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java new file mode 100644 index 00000000000..182753300e7 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/TypedDriverOption.java @@ -0,0 +1,944 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.config; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.StringJoiner; + +/** + * A type-safe wrapper around {@link DriverOption}, that encodes the intended value type of each + * option. + * + *

This type was introduced in conjunction with {@link DriverConfigLoader#fromMap(OptionsMap)}. + * Unfortunately, for backward compatibility reasons, it wasn't possible to retrofit the rest of the + * driver to use it; therefore the APIs used to read the configuration, such as {@link DriverConfig} + * and {@link DriverExecutionProfile}, still use the untyped {@link DriverOption}. + * + * @since 4.6.0 + */ +public class TypedDriverOption { + + private static volatile Iterable> builtInValues; + + /** + * Returns the list of all built-in options known to the driver codebase; in other words, all the + * {@link TypedDriverOption} constants defined on this class. + * + *

Note that 3rd-party driver extensions might define their own {@link TypedDriverOption} + * constants for custom options. + * + *

This method uses reflection to introspect all the constants on this class; the result is + * computed lazily on the first invocation, and then cached for future calls. + */ + public static Iterable> builtInValues() { + if (builtInValues == null) { + builtInValues = introspectBuiltInValues(); + } + return builtInValues; + } + + private final DriverOption rawOption; + private final GenericType expectedType; + + public TypedDriverOption( + @NonNull DriverOption rawOption, @NonNull GenericType expectedType) { + this.rawOption = Objects.requireNonNull(rawOption); + this.expectedType = Objects.requireNonNull(expectedType); + } + + @NonNull + public DriverOption getRawOption() { + return rawOption; + } + + @NonNull + public GenericType getExpectedType() { + return expectedType; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof TypedDriverOption) { + TypedDriverOption that = (TypedDriverOption) other; + return this.rawOption.equals(that.rawOption) && this.expectedType.equals(that.expectedType); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(rawOption, expectedType); + } + + @Override + public String toString() { + return new StringJoiner(", ", TypedDriverOption.class.getSimpleName() + "[", "]") + .add("rawOption=" + rawOption) + .add("expectedType=" + expectedType) + .toString(); + } + + /** The contact points to use for the initial connection to the cluster. */ + public static final TypedDriverOption> CONTACT_POINTS = + new TypedDriverOption<>(DefaultDriverOption.CONTACT_POINTS, GenericType.listOf(String.class)); + /** A name that uniquely identifies the driver instance. */ + public static final TypedDriverOption SESSION_NAME = + new TypedDriverOption<>(DefaultDriverOption.SESSION_NAME, GenericType.STRING); + /** The name of the keyspace that the session should initially be connected to. */ + public static final TypedDriverOption SESSION_KEYSPACE = + new TypedDriverOption<>(DefaultDriverOption.SESSION_KEYSPACE, GenericType.STRING); + /** How often the driver tries to reload the configuration. */ + public static final TypedDriverOption CONFIG_RELOAD_INTERVAL = + new TypedDriverOption<>(DefaultDriverOption.CONFIG_RELOAD_INTERVAL, GenericType.DURATION); + /** How long the driver waits for a request to complete. */ + public static final TypedDriverOption REQUEST_TIMEOUT = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_TIMEOUT, GenericType.DURATION); + /** The consistency level. */ + public static final TypedDriverOption REQUEST_CONSISTENCY = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_CONSISTENCY, GenericType.STRING); + /** The page size. */ + public static final TypedDriverOption REQUEST_PAGE_SIZE = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_PAGE_SIZE, GenericType.INTEGER); + /** The serial consistency level. */ + public static final TypedDriverOption REQUEST_SERIAL_CONSISTENCY = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY, GenericType.STRING); + /** The default idempotence of a request. */ + public static final TypedDriverOption REQUEST_DEFAULT_IDEMPOTENCE = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, GenericType.BOOLEAN); + /** The class of the load balancing policy. */ + public static final TypedDriverOption LOAD_BALANCING_POLICY_CLASS = + new TypedDriverOption<>(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, GenericType.STRING); + /** The datacenter that is considered "local". */ + public static final TypedDriverOption LOAD_BALANCING_LOCAL_DATACENTER = + new TypedDriverOption<>( + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, GenericType.STRING); + /** + * A custom filter to include/exclude nodes. + * + * @deprecated Use {@link #LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS} instead. + */ + @Deprecated + public static final TypedDriverOption LOAD_BALANCING_FILTER_CLASS = + new TypedDriverOption<>(DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, GenericType.STRING); + /** + * The class name of a custom {@link + * com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator}. + */ + public static final TypedDriverOption LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS = + new TypedDriverOption<>( + DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS, GenericType.STRING); + /** The timeout to use for internal queries that run as part of the initialization process. */ + public static final TypedDriverOption CONNECTION_INIT_QUERY_TIMEOUT = + new TypedDriverOption<>( + DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, GenericType.DURATION); + /** The timeout to use when the driver changes the keyspace on a connection at runtime. */ + public static final TypedDriverOption CONNECTION_SET_KEYSPACE_TIMEOUT = + new TypedDriverOption<>( + DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT, GenericType.DURATION); + /** The maximum number of requests that can be executed concurrently on a connection. */ + public static final TypedDriverOption CONNECTION_MAX_REQUESTS = + new TypedDriverOption<>(DefaultDriverOption.CONNECTION_MAX_REQUESTS, GenericType.INTEGER); + /** The maximum number of "orphaned" requests before a connection gets closed automatically. */ + public static final TypedDriverOption CONNECTION_MAX_ORPHAN_REQUESTS = + new TypedDriverOption<>( + DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS, GenericType.INTEGER); + /** Whether to log non-fatal errors when the driver tries to open a new connection. */ + public static final TypedDriverOption CONNECTION_WARN_INIT_ERROR = + new TypedDriverOption<>(DefaultDriverOption.CONNECTION_WARN_INIT_ERROR, GenericType.BOOLEAN); + /** The number of connections in the LOCAL pool. */ + public static final TypedDriverOption CONNECTION_POOL_LOCAL_SIZE = + new TypedDriverOption<>(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, GenericType.INTEGER); + /** The number of connections in the REMOTE pool. */ + public static final TypedDriverOption CONNECTION_POOL_REMOTE_SIZE = + new TypedDriverOption<>(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE, GenericType.INTEGER); + /** + * Whether to schedule reconnection attempts if all contact points are unreachable on the first + * initialization attempt. + */ + public static final TypedDriverOption RECONNECT_ON_INIT = + new TypedDriverOption<>(DefaultDriverOption.RECONNECT_ON_INIT, GenericType.BOOLEAN); + /** The class of the reconnection policy. */ + public static final TypedDriverOption RECONNECTION_POLICY_CLASS = + new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_POLICY_CLASS, GenericType.STRING); + /** Base delay for computing time between reconnection attempts. */ + public static final TypedDriverOption RECONNECTION_BASE_DELAY = + new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_BASE_DELAY, GenericType.DURATION); + /** Maximum delay between reconnection attempts. */ + public static final TypedDriverOption RECONNECTION_MAX_DELAY = + new TypedDriverOption<>(DefaultDriverOption.RECONNECTION_MAX_DELAY, GenericType.DURATION); + /** The class of the retry policy. */ + public static final TypedDriverOption RETRY_POLICY_CLASS = + new TypedDriverOption<>(DefaultDriverOption.RETRY_POLICY_CLASS, GenericType.STRING); + /** The class of the speculative execution policy. */ + public static final TypedDriverOption SPECULATIVE_EXECUTION_POLICY_CLASS = + new TypedDriverOption<>( + DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, GenericType.STRING); + /** The maximum number of executions. */ + public static final TypedDriverOption SPECULATIVE_EXECUTION_MAX = + new TypedDriverOption<>(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, GenericType.INTEGER); + /** The delay between each execution. */ + public static final TypedDriverOption SPECULATIVE_EXECUTION_DELAY = + new TypedDriverOption<>( + DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, GenericType.DURATION); + /** The class of the authentication provider. */ + public static final TypedDriverOption AUTH_PROVIDER_CLASS = + new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_CLASS, GenericType.STRING); + /** Plain text auth provider username. */ + public static final TypedDriverOption AUTH_PROVIDER_USER_NAME = + new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, GenericType.STRING); + /** Plain text auth provider password. */ + public static final TypedDriverOption AUTH_PROVIDER_PASSWORD = + new TypedDriverOption<>(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, GenericType.STRING); + /** The class of the SSL Engine Factory. */ + public static final TypedDriverOption SSL_ENGINE_FACTORY_CLASS = + new TypedDriverOption<>(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, GenericType.STRING); + /** The cipher suites to enable when creating an SSLEngine for a connection. */ + public static final TypedDriverOption> SSL_CIPHER_SUITES = + new TypedDriverOption<>( + DefaultDriverOption.SSL_CIPHER_SUITES, GenericType.listOf(String.class)); + /** + * Whether or not to require validation that the hostname of the server certificate's common name + * matches the hostname of the server being connected to. + */ + public static final TypedDriverOption SSL_HOSTNAME_VALIDATION = + new TypedDriverOption<>(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, GenericType.BOOLEAN); + + public static final TypedDriverOption SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN = + new TypedDriverOption<>( + DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, GenericType.BOOLEAN); + /** The location of the keystore file. */ + public static final TypedDriverOption SSL_KEYSTORE_PATH = + new TypedDriverOption<>(DefaultDriverOption.SSL_KEYSTORE_PATH, GenericType.STRING); + /** The keystore password. */ + public static final TypedDriverOption SSL_KEYSTORE_PASSWORD = + new TypedDriverOption<>(DefaultDriverOption.SSL_KEYSTORE_PASSWORD, GenericType.STRING); + + /** The duration between attempts to reload the keystore. */ + public static final TypedDriverOption SSL_KEYSTORE_RELOAD_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.SSL_KEYSTORE_RELOAD_INTERVAL, GenericType.DURATION); + + /** The location of the truststore file. */ + public static final TypedDriverOption SSL_TRUSTSTORE_PATH = + new TypedDriverOption<>(DefaultDriverOption.SSL_TRUSTSTORE_PATH, GenericType.STRING); + /** The truststore password. */ + public static final TypedDriverOption SSL_TRUSTSTORE_PASSWORD = + new TypedDriverOption<>(DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, GenericType.STRING); + /** The class of the generator that assigns a microsecond timestamp to each request. */ + public static final TypedDriverOption TIMESTAMP_GENERATOR_CLASS = + new TypedDriverOption<>(DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS, GenericType.STRING); + /** Whether to force the driver to use Java's millisecond-precision system clock. */ + public static final TypedDriverOption TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK = + new TypedDriverOption<>( + DefaultDriverOption.TIMESTAMP_GENERATOR_FORCE_JAVA_CLOCK, GenericType.BOOLEAN); + /** How far in the future timestamps are allowed to drift before the warning is logged. */ + public static final TypedDriverOption TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD = + new TypedDriverOption<>( + DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_THRESHOLD, GenericType.DURATION); + /** How often the warning will be logged if timestamps keep drifting above the threshold. */ + public static final TypedDriverOption TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.TIMESTAMP_GENERATOR_DRIFT_WARNING_INTERVAL, GenericType.DURATION); + + /** + * The class of a session-wide component that tracks the outcome of requests. + * + * @deprecated Use {@link #REQUEST_TRACKER_CLASSES} instead. + */ + @Deprecated + public static final TypedDriverOption REQUEST_TRACKER_CLASS = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACKER_CLASS, GenericType.STRING); + + /** The classes of session-wide components that track the outcome of requests. */ + public static final TypedDriverOption> REQUEST_TRACKER_CLASSES = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_TRACKER_CLASSES, GenericType.listOf(String.class)); + + /** The class of a session-wide component that generates request IDs. */ + public static final TypedDriverOption REQUEST_ID_GENERATOR_CLASS = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, GenericType.STRING); + + /** Whether to log successful requests. */ + public static final TypedDriverOption REQUEST_LOGGER_SUCCESS_ENABLED = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, GenericType.BOOLEAN); + /** The threshold to classify a successful request as "slow". */ + public static final TypedDriverOption REQUEST_LOGGER_SLOW_THRESHOLD = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_LOGGER_SLOW_THRESHOLD, GenericType.DURATION); + /** Whether to log slow requests. */ + public static final TypedDriverOption REQUEST_LOGGER_SLOW_ENABLED = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, GenericType.BOOLEAN); + /** Whether to log failed requests. */ + public static final TypedDriverOption REQUEST_LOGGER_ERROR_ENABLED = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, GenericType.BOOLEAN); + /** The maximum length of the query string in the log message. */ + public static final TypedDriverOption REQUEST_LOGGER_MAX_QUERY_LENGTH = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_LOGGER_MAX_QUERY_LENGTH, GenericType.INTEGER); + /** Whether to log bound values in addition to the query string. */ + public static final TypedDriverOption REQUEST_LOGGER_VALUES = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_VALUES, GenericType.BOOLEAN); + /** The maximum length for bound values in the log message. */ + public static final TypedDriverOption REQUEST_LOGGER_MAX_VALUE_LENGTH = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_LOGGER_MAX_VALUE_LENGTH, GenericType.INTEGER); + /** The maximum number of bound values to log. */ + public static final TypedDriverOption REQUEST_LOGGER_MAX_VALUES = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_MAX_VALUES, GenericType.INTEGER); + /** Whether to log stack traces for failed queries. */ + public static final TypedDriverOption REQUEST_LOGGER_STACK_TRACES = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, GenericType.BOOLEAN); + /** + * The class of a session-wide component that controls the rate at which requests are executed. + */ + public static final TypedDriverOption REQUEST_THROTTLER_CLASS = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_THROTTLER_CLASS, GenericType.STRING); + /** The maximum number of requests that are allowed to execute in parallel. */ + public static final TypedDriverOption REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_THROTTLER_MAX_CONCURRENT_REQUESTS, GenericType.INTEGER); + /** The maximum allowed request rate. */ + public static final TypedDriverOption REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_THROTTLER_MAX_REQUESTS_PER_SECOND, GenericType.INTEGER); + /** + * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. + */ + public static final TypedDriverOption REQUEST_THROTTLER_MAX_QUEUE_SIZE = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE, GenericType.INTEGER); + /** How often the throttler attempts to dequeue requests. */ + public static final TypedDriverOption REQUEST_THROTTLER_DRAIN_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_THROTTLER_DRAIN_INTERVAL, GenericType.DURATION); + + /** + * The class of a session-wide component that listens for node state changes. + * + * @deprecated Use {@link #METADATA_NODE_STATE_LISTENER_CLASSES} instead. + */ + @Deprecated + public static final TypedDriverOption METADATA_NODE_STATE_LISTENER_CLASS = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS, GenericType.STRING); + + /** + * The class of a session-wide component that listens for schema changes. + * + * @deprecated Use {@link #METADATA_SCHEMA_CHANGE_LISTENER_CLASSES} instead. + */ + @Deprecated + public static final TypedDriverOption METADATA_SCHEMA_CHANGE_LISTENER_CLASS = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS, GenericType.STRING); + + /** The classes of session-wide components that listen for node state changes. */ + public static final TypedDriverOption> METADATA_NODE_STATE_LISTENER_CLASSES = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES, + GenericType.listOf(String.class)); + + /** The classes of session-wide components that listen for schema changes. */ + public static final TypedDriverOption> METADATA_SCHEMA_CHANGE_LISTENER_CLASSES = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES, + GenericType.listOf(String.class)); + + /** + * The class of the address translator to use to convert the addresses sent by Cassandra nodes + * into ones that the driver uses to connect. + */ + public static final TypedDriverOption ADDRESS_TRANSLATOR_CLASS = + new TypedDriverOption<>(DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS, GenericType.STRING); + /** The native protocol version to use. */ + public static final TypedDriverOption PROTOCOL_VERSION = + new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_VERSION, GenericType.STRING); + /** The name of the algorithm used to compress protocol frames. */ + public static final TypedDriverOption PROTOCOL_COMPRESSION = + new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_COMPRESSION, GenericType.STRING); + /** The maximum length, in bytes, of the frames supported by the driver. */ + public static final TypedDriverOption PROTOCOL_MAX_FRAME_LENGTH = + new TypedDriverOption<>(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH, GenericType.LONG); + /** + * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active + * keyspace. + */ + public static final TypedDriverOption REQUEST_WARN_IF_SET_KEYSPACE = + new TypedDriverOption<>( + DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE, GenericType.BOOLEAN); + /** How many times the driver will attempt to fetch the query trace if it is not ready yet. */ + public static final TypedDriverOption REQUEST_TRACE_ATTEMPTS = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_ATTEMPTS, GenericType.INTEGER); + /** The interval between each attempt. */ + public static final TypedDriverOption REQUEST_TRACE_INTERVAL = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_INTERVAL, GenericType.DURATION); + /** The consistency level to use for trace queries. */ + public static final TypedDriverOption REQUEST_TRACE_CONSISTENCY = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY, GenericType.STRING); + /** Whether or not to publish aggregable histogram for metrics */ + public static final TypedDriverOption METRICS_GENERATE_AGGREGABLE_HISTOGRAMS = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS, GenericType.BOOLEAN); + /** List of enabled session-level metrics. */ + public static final TypedDriverOption> METRICS_SESSION_ENABLED = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_ENABLED, GenericType.listOf(String.class)); + /** List of enabled node-level metrics. */ + public static final TypedDriverOption> METRICS_NODE_ENABLED = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_ENABLED, GenericType.listOf(String.class)); + /** The largest latency that we expect to record for requests. */ + public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_HIGHEST = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, GenericType.DURATION); + /** The shortest latency that we expect to record for requests. */ + public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_LOWEST = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, GenericType.DURATION); + /** Optional service-level objectives to meet, as a list of latencies to track. */ + public static final TypedDriverOption> METRICS_SESSION_CQL_REQUESTS_SLO = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO, + GenericType.listOf(GenericType.DURATION)); + /** Optional pre-defined percentile of cql requests to publish, as a list of percentiles . */ + public static final TypedDriverOption> + METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, + GenericType.listOf(GenericType.DOUBLE)); + /** + * The number of significant decimal digits to which internal structures will maintain for + * requests. + */ + public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_DIGITS = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, GenericType.INTEGER); + /** The interval at which percentile data is refreshed for requests. */ + public static final TypedDriverOption METRICS_SESSION_CQL_REQUESTS_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL, GenericType.DURATION); + /** The largest latency that we expect to record for throttling. */ + public static final TypedDriverOption METRICS_SESSION_THROTTLING_HIGHEST = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, GenericType.DURATION); + /** The shortest latency that we expect to record for throttling. */ + public static final TypedDriverOption METRICS_SESSION_THROTTLING_LOWEST = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST, GenericType.DURATION); + /** Optional service-level objectives to meet, as a list of latencies to track. */ + public static final TypedDriverOption> METRICS_SESSION_THROTTLING_SLO = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO, + GenericType.listOf(GenericType.DURATION)); + /** Optional pre-defined percentile of throttling delay to publish, as a list of percentiles . */ + public static final TypedDriverOption> + METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES, + GenericType.listOf(GenericType.DOUBLE)); + /** + * The number of significant decimal digits to which internal structures will maintain for + * throttling. + */ + public static final TypedDriverOption METRICS_SESSION_THROTTLING_DIGITS = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, GenericType.INTEGER); + /** The interval at which percentile data is refreshed for throttling. */ + public static final TypedDriverOption METRICS_SESSION_THROTTLING_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_SESSION_THROTTLING_INTERVAL, GenericType.DURATION); + /** The largest latency that we expect to record for requests. */ + public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_HIGHEST = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, GenericType.DURATION); + /** The shortest latency that we expect to record for requests. */ + public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_LOWEST = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, GenericType.DURATION); + /** Optional service-level objectives to meet, as a list of latencies to track. */ + public static final TypedDriverOption> METRICS_NODE_CQL_MESSAGES_SLO = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO, + GenericType.listOf(GenericType.DURATION)); + /** Optional pre-defined percentile of node cql messages to publish, as a list of percentiles . */ + public static final TypedDriverOption> + METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES, + GenericType.listOf(GenericType.DOUBLE)); + /** + * The number of significant decimal digits to which internal structures will maintain for + * requests. + */ + public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_DIGITS = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, GenericType.INTEGER); + /** The interval at which percentile data is refreshed for requests. */ + public static final TypedDriverOption METRICS_NODE_CQL_MESSAGES_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL, GenericType.DURATION); + /** Whether or not to disable the Nagle algorithm. */ + public static final TypedDriverOption SOCKET_TCP_NODELAY = + new TypedDriverOption<>(DefaultDriverOption.SOCKET_TCP_NODELAY, GenericType.BOOLEAN); + /** Whether or not to enable TCP keep-alive probes. */ + public static final TypedDriverOption SOCKET_KEEP_ALIVE = + new TypedDriverOption<>(DefaultDriverOption.SOCKET_KEEP_ALIVE, GenericType.BOOLEAN); + /** Whether or not to allow address reuse. */ + public static final TypedDriverOption SOCKET_REUSE_ADDRESS = + new TypedDriverOption<>(DefaultDriverOption.SOCKET_REUSE_ADDRESS, GenericType.BOOLEAN); + /** Sets the linger interval. */ + public static final TypedDriverOption SOCKET_LINGER_INTERVAL = + new TypedDriverOption<>(DefaultDriverOption.SOCKET_LINGER_INTERVAL, GenericType.INTEGER); + /** Sets a hint to the size of the underlying buffers for incoming network I/O. */ + public static final TypedDriverOption SOCKET_RECEIVE_BUFFER_SIZE = + new TypedDriverOption<>(DefaultDriverOption.SOCKET_RECEIVE_BUFFER_SIZE, GenericType.INTEGER); + /** Sets a hint to the size of the underlying buffers for outgoing network I/O. */ + public static final TypedDriverOption SOCKET_SEND_BUFFER_SIZE = + new TypedDriverOption<>(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE, GenericType.INTEGER); + /** The connection heartbeat interval. */ + public static final TypedDriverOption HEARTBEAT_INTERVAL = + new TypedDriverOption<>(DefaultDriverOption.HEARTBEAT_INTERVAL, GenericType.DURATION); + /** How long the driver waits for the response to a heartbeat. */ + public static final TypedDriverOption HEARTBEAT_TIMEOUT = + new TypedDriverOption<>(DefaultDriverOption.HEARTBEAT_TIMEOUT, GenericType.DURATION); + /** How long the driver waits to propagate a Topology event. */ + public static final TypedDriverOption METADATA_TOPOLOGY_WINDOW = + new TypedDriverOption<>(DefaultDriverOption.METADATA_TOPOLOGY_WINDOW, GenericType.DURATION); + /** The maximum number of events that can accumulate. */ + public static final TypedDriverOption METADATA_TOPOLOGY_MAX_EVENTS = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_TOPOLOGY_MAX_EVENTS, GenericType.INTEGER); + /** Whether schema metadata is enabled. */ + public static final TypedDriverOption METADATA_SCHEMA_ENABLED = + new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_ENABLED, GenericType.BOOLEAN); + /** The timeout for the requests to the schema tables. */ + public static final TypedDriverOption METADATA_SCHEMA_REQUEST_TIMEOUT = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT, GenericType.DURATION); + /** The page size for the requests to the schema tables. */ + public static final TypedDriverOption METADATA_SCHEMA_REQUEST_PAGE_SIZE = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE, GenericType.INTEGER); + /** The list of keyspaces for which schema and token metadata should be maintained. */ + public static final TypedDriverOption> METADATA_SCHEMA_REFRESHED_KEYSPACES = + new TypedDriverOption<>( + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, + GenericType.listOf(String.class)); + /** How long the driver waits to apply a refresh. */ + public static final TypedDriverOption METADATA_SCHEMA_WINDOW = + new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_WINDOW, GenericType.DURATION); + /** The maximum number of refreshes that can accumulate. */ + public static final TypedDriverOption METADATA_SCHEMA_MAX_EVENTS = + new TypedDriverOption<>(DefaultDriverOption.METADATA_SCHEMA_MAX_EVENTS, GenericType.INTEGER); + /** Whether token metadata is enabled. */ + public static final TypedDriverOption METADATA_TOKEN_MAP_ENABLED = + new TypedDriverOption<>(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED, GenericType.BOOLEAN); + /** How long the driver waits for responses to control queries. */ + public static final TypedDriverOption CONTROL_CONNECTION_TIMEOUT = + new TypedDriverOption<>(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT, GenericType.DURATION); + /** The interval between each schema agreement check attempt. */ + public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_INTERVAL = + new TypedDriverOption<>( + DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_INTERVAL, GenericType.DURATION); + /** The timeout after which schema agreement fails. */ + public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_TIMEOUT = + new TypedDriverOption<>( + DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, GenericType.DURATION); + /** Whether to log a warning if schema agreement fails. */ + public static final TypedDriverOption CONTROL_CONNECTION_AGREEMENT_WARN = + new TypedDriverOption<>( + DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_WARN, GenericType.BOOLEAN); + /** Whether `Session.prepare` calls should be sent to all nodes in the cluster. */ + public static final TypedDriverOption PREPARE_ON_ALL_NODES = + new TypedDriverOption<>(DefaultDriverOption.PREPARE_ON_ALL_NODES, GenericType.BOOLEAN); + /** Whether the driver tries to prepare on new nodes at all. */ + public static final TypedDriverOption REPREPARE_ENABLED = + new TypedDriverOption<>(DefaultDriverOption.REPREPARE_ENABLED, GenericType.BOOLEAN); + /** Whether to check `system.prepared_statements` on the target node before repreparing. */ + public static final TypedDriverOption REPREPARE_CHECK_SYSTEM_TABLE = + new TypedDriverOption<>( + DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE, GenericType.BOOLEAN); + /** The maximum number of statements that should be reprepared. */ + public static final TypedDriverOption REPREPARE_MAX_STATEMENTS = + new TypedDriverOption<>(DefaultDriverOption.REPREPARE_MAX_STATEMENTS, GenericType.INTEGER); + /** The maximum number of concurrent requests when repreparing. */ + public static final TypedDriverOption REPREPARE_MAX_PARALLELISM = + new TypedDriverOption<>(DefaultDriverOption.REPREPARE_MAX_PARALLELISM, GenericType.INTEGER); + /** The request timeout when repreparing. */ + public static final TypedDriverOption REPREPARE_TIMEOUT = + new TypedDriverOption<>(DefaultDriverOption.REPREPARE_TIMEOUT, GenericType.DURATION); + /** Whether the prepared statements cache use weak values. */ + public static final TypedDriverOption PREPARED_CACHE_WEAK_VALUES = + new TypedDriverOption<>(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, GenericType.BOOLEAN); + /** The number of threads in the I/O group. */ + public static final TypedDriverOption NETTY_IO_SIZE = + new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SIZE, GenericType.INTEGER); + /** Quiet period for I/O group shutdown. */ + public static final TypedDriverOption NETTY_IO_SHUTDOWN_QUIET_PERIOD = + new TypedDriverOption<>( + DefaultDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD, GenericType.INTEGER); + /** Max time to wait for I/O group shutdown. */ + public static final TypedDriverOption NETTY_IO_SHUTDOWN_TIMEOUT = + new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT, GenericType.INTEGER); + /** Units for I/O group quiet period and timeout. */ + public static final TypedDriverOption NETTY_IO_SHUTDOWN_UNIT = + new TypedDriverOption<>(DefaultDriverOption.NETTY_IO_SHUTDOWN_UNIT, GenericType.STRING); + /** The number of threads in the Admin group. */ + public static final TypedDriverOption NETTY_ADMIN_SIZE = + new TypedDriverOption<>(DefaultDriverOption.NETTY_ADMIN_SIZE, GenericType.INTEGER); + /** Quiet period for admin group shutdown. */ + public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD = + new TypedDriverOption<>( + DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_QUIET_PERIOD, GenericType.INTEGER); + /** Max time to wait for admin group shutdown. */ + public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_TIMEOUT = + new TypedDriverOption<>( + DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_TIMEOUT, GenericType.INTEGER); + /** Units for admin group quiet period and timeout. */ + public static final TypedDriverOption NETTY_ADMIN_SHUTDOWN_UNIT = + new TypedDriverOption<>(DefaultDriverOption.NETTY_ADMIN_SHUTDOWN_UNIT, GenericType.STRING); + /** @deprecated This option was removed in version 4.6.1. */ + @Deprecated + public static final TypedDriverOption COALESCER_MAX_RUNS = + new TypedDriverOption<>(DefaultDriverOption.COALESCER_MAX_RUNS, GenericType.INTEGER); + /** The coalescer reschedule interval. */ + public static final TypedDriverOption COALESCER_INTERVAL = + new TypedDriverOption<>(DefaultDriverOption.COALESCER_INTERVAL, GenericType.DURATION); + /** Whether to resolve the addresses passed to `basic.contact-points`. */ + public static final TypedDriverOption RESOLVE_CONTACT_POINTS = + new TypedDriverOption<>(DefaultDriverOption.RESOLVE_CONTACT_POINTS, GenericType.BOOLEAN); + /** + * This is how frequent the timer should wake up to check for timed-out tasks or speculative + * executions. + */ + public static final TypedDriverOption NETTY_TIMER_TICK_DURATION = + new TypedDriverOption<>(DefaultDriverOption.NETTY_TIMER_TICK_DURATION, GenericType.DURATION); + /** Number of ticks in the Timer wheel. */ + public static final TypedDriverOption NETTY_TIMER_TICKS_PER_WHEEL = + new TypedDriverOption<>(DefaultDriverOption.NETTY_TIMER_TICKS_PER_WHEEL, GenericType.INTEGER); + /** + * Whether logging of server warnings generated during query execution should be disabled by the + * driver. + */ + public static final TypedDriverOption REQUEST_LOG_WARNINGS = + new TypedDriverOption<>(DefaultDriverOption.REQUEST_LOG_WARNINGS, GenericType.BOOLEAN); + /** Whether the threads created by the driver should be daemon threads. */ + public static final TypedDriverOption NETTY_DAEMON = + new TypedDriverOption<>(DefaultDriverOption.NETTY_DAEMON, GenericType.BOOLEAN); + /** + * The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a + * service. + */ + public static final TypedDriverOption CLOUD_SECURE_CONNECT_BUNDLE = + new TypedDriverOption<>(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, GenericType.STRING); + /** Whether the slow replica avoidance should be enabled in the default LBP. */ + public static final TypedDriverOption LOAD_BALANCING_POLICY_SLOW_AVOIDANCE = + new TypedDriverOption<>( + DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, GenericType.BOOLEAN); + /** The timeout to use when establishing driver connections. */ + public static final TypedDriverOption CONNECTION_CONNECT_TIMEOUT = + new TypedDriverOption<>(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT, GenericType.DURATION); + /** The maximum number of live sessions that are allowed to coexist in a given VM. */ + public static final TypedDriverOption SESSION_LEAK_THRESHOLD = + new TypedDriverOption<>(DefaultDriverOption.SESSION_LEAK_THRESHOLD, GenericType.INTEGER); + + /** The name of the application using the session. */ + public static final TypedDriverOption APPLICATION_NAME = + new TypedDriverOption<>(DseDriverOption.APPLICATION_NAME, GenericType.STRING); + /** The version of the application using the session. */ + public static final TypedDriverOption APPLICATION_VERSION = + new TypedDriverOption<>(DseDriverOption.APPLICATION_VERSION, GenericType.STRING); + /** Proxy authentication for GSSAPI authentication: allows to login as another user or role. */ + public static final TypedDriverOption AUTH_PROVIDER_AUTHORIZATION_ID = + new TypedDriverOption<>(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, GenericType.STRING); + /** Service name for GSSAPI authentication. */ + public static final TypedDriverOption AUTH_PROVIDER_SERVICE = + new TypedDriverOption<>(DseDriverOption.AUTH_PROVIDER_SERVICE, GenericType.STRING); + /** Login configuration for GSSAPI authentication. */ + public static final TypedDriverOption AUTH_PROVIDER_LOGIN_CONFIGURATION = + new TypedDriverOption<>( + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, GenericType.STRING); + /** Internal SASL properties, if any, such as QOP, for GSSAPI authentication. */ + public static final TypedDriverOption> AUTH_PROVIDER_SASL_PROPERTIES = + new TypedDriverOption<>( + DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, + GenericType.mapOf(GenericType.STRING, GenericType.STRING)); + /** The page size for continuous paging. */ + public static final TypedDriverOption CONTINUOUS_PAGING_PAGE_SIZE = + new TypedDriverOption<>(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, GenericType.INTEGER); + /** + * Whether {@link #CONTINUOUS_PAGING_PAGE_SIZE} should be interpreted in number of rows or bytes. + */ + public static final TypedDriverOption CONTINUOUS_PAGING_PAGE_SIZE_BYTES = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, GenericType.BOOLEAN); + /** The maximum number of continuous pages to return. */ + public static final TypedDriverOption CONTINUOUS_PAGING_MAX_PAGES = + new TypedDriverOption<>(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES, GenericType.INTEGER); + /** The maximum number of continuous pages per second. */ + public static final TypedDriverOption CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, GenericType.INTEGER); + /** The maximum number of continuous pages that can be stored in the local queue. */ + public static final TypedDriverOption CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, GenericType.INTEGER); + /** How long to wait for the coordinator to send the first continuous page. */ + public static final TypedDriverOption CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, GenericType.DURATION); + /** How long to wait for the coordinator to send subsequent continuous pages. */ + public static final TypedDriverOption CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, GenericType.DURATION); + /** The largest latency that we expect to record for continuous requests. */ + public static final TypedDriverOption + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, + GenericType.DURATION); + /** The shortest latency that we expect to record for continuous requests. */ + public static final TypedDriverOption + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, + GenericType.DURATION); + /** Optional service-level objectives to meet, as a list of latencies to track. */ + public static final TypedDriverOption> + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO, + GenericType.listOf(GenericType.DURATION)); + /** + * Optional pre-defined percentile of continuous paging cql requests to publish, as a list of + * percentiles . + */ + public static final TypedDriverOption> + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, + GenericType.listOf(GenericType.DOUBLE)); + /** + * The number of significant decimal digits to which internal structures will maintain for + * continuous requests. + */ + public static final TypedDriverOption + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, + GenericType.INTEGER); + /** The interval at which percentile data is refreshed for continuous requests. */ + public static final TypedDriverOption + CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL = + new TypedDriverOption<>( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL, + GenericType.DURATION); + /** The read consistency level to use for graph statements. */ + public static final TypedDriverOption GRAPH_READ_CONSISTENCY_LEVEL = + new TypedDriverOption<>(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, GenericType.STRING); + /** The write consistency level to use for graph statements. */ + public static final TypedDriverOption GRAPH_WRITE_CONSISTENCY_LEVEL = + new TypedDriverOption<>(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, GenericType.STRING); + /** The traversal source to use for graph statements. */ + public static final TypedDriverOption GRAPH_TRAVERSAL_SOURCE = + new TypedDriverOption<>(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, GenericType.STRING); + /** + * The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra + * native protocol. + */ + public static final TypedDriverOption GRAPH_SUB_PROTOCOL = + new TypedDriverOption<>(DseDriverOption.GRAPH_SUB_PROTOCOL, GenericType.STRING); + /** Whether a script statement represents a system query. */ + public static final TypedDriverOption GRAPH_IS_SYSTEM_QUERY = + new TypedDriverOption<>(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, GenericType.BOOLEAN); + /** The name of the graph targeted by graph statements. */ + public static final TypedDriverOption GRAPH_NAME = + new TypedDriverOption<>(DseDriverOption.GRAPH_NAME, GenericType.STRING); + /** How long the driver waits for a graph request to complete. */ + public static final TypedDriverOption GRAPH_TIMEOUT = + new TypedDriverOption<>(DseDriverOption.GRAPH_TIMEOUT, GenericType.DURATION); + /** Whether to send events for Insights monitoring. */ + public static final TypedDriverOption MONITOR_REPORTING_ENABLED = + new TypedDriverOption<>(DseDriverOption.MONITOR_REPORTING_ENABLED, GenericType.BOOLEAN); + /** Whether to enable paging for Graph queries. */ + public static final TypedDriverOption GRAPH_PAGING_ENABLED = + new TypedDriverOption<>(DseDriverOption.GRAPH_PAGING_ENABLED, GenericType.STRING); + /** The page size for Graph continuous paging. */ + public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_PAGE_SIZE = + new TypedDriverOption<>( + DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, GenericType.INTEGER); + /** The maximum number of Graph continuous pages to return. */ + public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_PAGES = + new TypedDriverOption<>( + DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, GenericType.INTEGER); + /** The maximum number of Graph continuous pages per second. */ + public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND = + new TypedDriverOption<>( + DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, GenericType.INTEGER); + /** The maximum number of Graph continuous pages that can be stored in the local queue. */ + public static final TypedDriverOption GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES = + new TypedDriverOption<>( + DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, GenericType.INTEGER); + /** The largest latency that we expect to record for graph requests. */ + public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_HIGHEST = + new TypedDriverOption<>( + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, GenericType.DURATION); + /** The shortest latency that we expect to record for graph requests. */ + public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_LOWEST = + new TypedDriverOption<>( + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, GenericType.DURATION); + /** Optional service-level objectives to meet, as a list of latencies to track. */ + public static final TypedDriverOption> METRICS_SESSION_GRAPH_REQUESTS_SLO = + new TypedDriverOption<>( + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO, + GenericType.listOf(GenericType.DURATION)); + /** Optional pre-defined percentile of graph requests to publish, as a list of percentiles . */ + public static final TypedDriverOption> + METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES = + new TypedDriverOption<>( + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES, + GenericType.listOf(GenericType.DOUBLE)); + /** + * The number of significant decimal digits to which internal structures will maintain for graph + * requests. + */ + public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_DIGITS = + new TypedDriverOption<>( + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, GenericType.INTEGER); + /** The interval at which percentile data is refreshed for graph requests. */ + public static final TypedDriverOption METRICS_SESSION_GRAPH_REQUESTS_INTERVAL = + new TypedDriverOption<>( + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL, GenericType.DURATION); + /** The largest latency that we expect to record for graph requests. */ + public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_HIGHEST = + new TypedDriverOption<>( + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, GenericType.DURATION); + /** The shortest latency that we expect to record for graph requests. */ + public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_LOWEST = + new TypedDriverOption<>( + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, GenericType.DURATION); + /** Optional service-level objectives to meet, as a list of latencies to track. */ + public static final TypedDriverOption> METRICS_NODE_GRAPH_MESSAGES_SLO = + new TypedDriverOption<>( + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO, + GenericType.listOf(GenericType.DURATION)); + /** + * Optional pre-defined percentile of node graph requests to publish, as a list of percentiles . + */ + public static final TypedDriverOption> + METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES = + new TypedDriverOption<>( + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES, + GenericType.listOf(GenericType.DOUBLE)); + /** + * The number of significant decimal digits to which internal structures will maintain for graph + * requests. + */ + public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_DIGITS = + new TypedDriverOption<>( + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, GenericType.INTEGER); + /** The interval at which percentile data is refreshed for graph requests. */ + public static final TypedDriverOption METRICS_NODE_GRAPH_MESSAGES_INTERVAL = + new TypedDriverOption<>( + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL, GenericType.DURATION); + + /** The time after which the node level metrics will be evicted. */ + public static final TypedDriverOption METRICS_NODE_EXPIRE_AFTER = + new TypedDriverOption<>(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, GenericType.DURATION); + + /** The classname of the desired MetricsFactory implementation. */ + public static final TypedDriverOption METRICS_FACTORY_CLASS = + new TypedDriverOption<>(DefaultDriverOption.METRICS_FACTORY_CLASS, GenericType.STRING); + + /** The classname of the desired {@code MetricIdGenerator} implementation. */ + public static final TypedDriverOption METRICS_ID_GENERATOR_CLASS = + new TypedDriverOption<>(DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, GenericType.STRING); + + /** The value of the prefix to prepend to all metric names. */ + public static final TypedDriverOption METRICS_ID_GENERATOR_PREFIX = + new TypedDriverOption<>(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, GenericType.STRING); + + /** The maximum number of nodes from remote DCs to include in query plans. */ + public static final TypedDriverOption + LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC = + new TypedDriverOption<>( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, + GenericType.INTEGER); + /** Whether to consider nodes from remote DCs if the request's consistency level is local. */ + public static final TypedDriverOption + LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS = + new TypedDriverOption<>( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, + GenericType.BOOLEAN); + + public static final TypedDriverOption ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME = + new TypedDriverOption<>( + DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME, GenericType.STRING); + public static final TypedDriverOption> ADDRESS_TRANSLATOR_SUBNET_ADDRESSES = + new TypedDriverOption<>( + DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES, + GenericType.mapOf(GenericType.STRING, GenericType.STRING)); + public static final TypedDriverOption ADDRESS_TRANSLATOR_DEFAULT_ADDRESS = + new TypedDriverOption<>( + DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, GenericType.STRING); + public static final TypedDriverOption ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES = + new TypedDriverOption<>( + DefaultDriverOption.ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES, GenericType.BOOLEAN); + + /** + * Ordered preference list of remote dcs optionally supplied for automatic failover and included + * in query plan. This feature is enabled only when max-nodes-per-remote-dc is greater than 0. + */ + public static final TypedDriverOption> + LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS = + new TypedDriverOption<>( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS, + GenericType.listOf(String.class)); + + private static Iterable> introspectBuiltInValues() { + try { + ImmutableList.Builder> result = ImmutableList.builder(); + for (Field field : TypedDriverOption.class.getFields()) { + if ((field.getModifiers() & PUBLIC_STATIC_FINAL) == PUBLIC_STATIC_FINAL + && field.getType() == TypedDriverOption.class) { + TypedDriverOption typedOption = (TypedDriverOption) field.get(null); + result.add(typedOption); + } + } + return result.build(); + } catch (IllegalAccessException e) { + throw new IllegalStateException("Unexpected error while introspecting built-in values", e); + } + } + + private static final int PUBLIC_STATIC_FINAL = Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL; +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java index 6ddc5abaf62..a751d983e70 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/config/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java index 1c725715d54..8069474612a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/BusyConnectionException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,11 +27,14 @@ * requests. * *

This might happen under heavy load. The driver will automatically try the next node in the - * query plan. Therefore the only way that the client can observe this exception is as part of a + * query plan. Therefore, the only way that the client can observe this exception is as part of a * {@link AllNodesFailedException}. */ public class BusyConnectionException extends DriverException { + // Note: the driver doesn't use this constructor anymore, it is preserved only for backward + // compatibility. + @SuppressWarnings("unused") public BusyConnectionException(int maxAvailableIds) { this( String.format( @@ -38,6 +43,10 @@ public BusyConnectionException(int maxAvailableIds) { false); } + public BusyConnectionException(String message) { + this(message, null, false); + } + private BusyConnectionException( String message, ExecutionInfo executionInfo, boolean writableStackTrace) { super(message, executionInfo, null, writableStackTrace); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java index 9daee547a46..a192e2c5efc 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,8 +28,9 @@ *

For example, this can happen if the node is unresponsive and a heartbeat query failed, or if * the node was forced down. * - *

The driver will always retry these requests on the next node transparently. Therefore, the - * only way to observe this exception is as part of an {@link AllNodesFailedException}. + *

The driver will retry these requests on the next node transparently, unless the request is not + * idempotent. Therefore, this exception is usually observed as part of an {@link + * AllNodesFailedException}. */ public class ClosedConnectionException extends DriverException { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java index 4112bdcd6f8..519624e8d5d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ConnectionInitException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +28,7 @@ * *

The only time when this is returned directly to the client (wrapped in a {@link * AllNodesFailedException}) is at initialization. If it happens later when the driver is already - * connected, it is just logged an the connection is reattempted. + * connected, it is just logged and the connection is reattempted. */ public class ConnectionInitException extends DriverException { public ConnectionInitException(@NonNull String message, @Nullable Throwable cause) { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java new file mode 100644 index 00000000000..d0fc8fc3b73 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/CrcMismatchException.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.connection; + +import com.datastax.oss.driver.api.core.DriverException; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * Thrown when the checksums in a server response don't match (protocol v5 or above). + * + *

This indicates a data corruption issue, either due to a hardware issue on the client, or on + * the network between the server and the client. It is not recoverable: the driver will drop the + * connection. + */ +public class CrcMismatchException extends DriverException { + + public CrcMismatchException(@NonNull String message) { + super(message, null, null, true); + } + + @NonNull + @Override + public DriverException copy() { + return new CrcMismatchException(getMessage()); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java index e84504d089f..9954aefb3d4 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/FrameTooLongException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java index 183f7c5366e..60c3d60a69d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/HeartbeatException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,8 +30,8 @@ * *

Heartbeat queries are sent automatically on idle connections, to ensure that they are still * alive. If a heartbeat query fails, the connection is closed, and all pending queries are aborted. - * The exception will be passed to {@link RetryPolicy#onRequestAborted(Request, Throwable, int)}, - * which decides what to do next (the default policy retries the query on the next node). + * The exception will be passed to {@link RetryPolicy#onRequestAbortedVerdict(Request, Throwable, + * int)}, which decides what to do next (the default policy retries the query on the next node). */ public class HeartbeatException extends DriverException { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java index 083e83950c6..9f81843c9c0 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java index ade4f228669..737f985ad1d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/connection/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java b/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java index b4efb691494..6f0afd3df8a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/context/DriverContext.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,6 +33,7 @@ import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; import com.datastax.oss.driver.api.core.time.TimestampGenerator; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; import com.datastax.oss.driver.api.core.tracker.RequestTracker; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.Map; @@ -137,6 +140,10 @@ default SpeculativeExecutionPolicy getSpeculativeExecutionPolicy(@NonNull String @NonNull RequestTracker getRequestTracker(); + /** @return The driver's request ID generator; never {@code null}. */ + @NonNull + Optional getRequestIdGenerator(); + /** @return The driver's request throttler; never {@code null}. */ @NonNull RequestThrottler getRequestThrottler(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java new file mode 100644 index 00000000000..7b56bd61a09 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cql; + +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletionStage; + +/** + * A session that offers user-friendly methods to execute CQL requests asynchronously. + * + * @since 4.4.0 + */ +public interface AsyncCqlSession extends Session { + + /** + * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, + * generally before the result is available). + * + * @param statement the CQL query to execute (that can be any {@code Statement}). + * @return a {@code CompletionStage} that, once complete, will produce the async result set. + */ + @NonNull + default CompletionStage executeAsync(@NonNull Statement statement) { + return Objects.requireNonNull( + execute(statement, Statement.ASYNC), "The CQL processor should never return a null result"); + } + + /** + * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, + * generally before the result is available). + * + *

This is an alias for {@link #executeAsync(Statement)} + * executeAsync(SimpleStatement.newInstance(query))}. + * + * @param query the CQL query to execute. + * @return a {@code CompletionStage} that, once complete, will produce the async result set. + * @see SimpleStatement#newInstance(String) + */ + @NonNull + default CompletionStage executeAsync(@NonNull String query) { + return executeAsync(SimpleStatement.newInstance(query)); + } + + /** + * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, + * generally before the result is available). + * + *

This is an alias for {@link #executeAsync(Statement)} + * executeAsync(SimpleStatement.newInstance(query, values))}. + * + * @param query the CQL query to execute. + * @param values the values for placeholders in the query string. Individual values can be {@code + * null}, but the vararg array itself can't. + * @return a {@code CompletionStage} that, once complete, will produce the async result set. + * @see SimpleStatement#newInstance(String, Object...) + */ + @NonNull + default CompletionStage executeAsync( + @NonNull String query, @NonNull Object... values) { + return executeAsync(SimpleStatement.newInstance(query, values)); + } + + /** + * Executes a CQL statement asynchronously (the call returns as soon as the statement was sent, + * generally before the result is available). + * + *

This is an alias for {@link #executeAsync(Statement)} + * executeAsync(SimpleStatement.newInstance(query, values))}. + * + * @param query the CQL query to execute. + * @param values the values for named placeholders in the query string. Individual values can be + * {@code null}, but the map itself can't. + * @return a {@code CompletionStage} that, once complete, will produce the async result set. + * @see SimpleStatement#newInstance(String, Map) + */ + @NonNull + default CompletionStage executeAsync( + @NonNull String query, @NonNull Map values) { + return executeAsync(SimpleStatement.newInstance(query, values)); + } + + /** + * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was + * sent, generally before the statement is prepared). + * + *

Note that the bound statements created from the resulting prepared statement will inherit + * some of the attributes of {@code query}; see {@link SyncCqlSession#prepare(SimpleStatement)} + * for more details. + * + *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for + * more explanations). + * + * @param statement the CQL query to prepare (that can be any {@code SimpleStatement}). + * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. + */ + @NonNull + default CompletionStage prepareAsync(@NonNull SimpleStatement statement) { + return Objects.requireNonNull( + execute(new DefaultPrepareRequest(statement), PrepareRequest.ASYNC), + "The CQL prepare processor should never return a null result"); + } + + /** + * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was + * sent, generally before the statement is prepared). + * + *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for + * more explanations). + * + * @param query the CQL query string to prepare. + * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. + */ + @NonNull + default CompletionStage prepareAsync(@NonNull String query) { + return Objects.requireNonNull( + execute(new DefaultPrepareRequest(query), PrepareRequest.ASYNC), + "The CQL prepare processor should never return a null result"); + } + + /** + * Prepares a CQL statement asynchronously (the call returns as soon as the prepare query was + * sent, generally before the statement is prepared). + * + *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to + * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link + * SyncCqlSession#prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely + * have to deal with {@link PrepareRequest} directly. + * + *

The result of this method is cached (see {@link SyncCqlSession#prepare(SimpleStatement)} for + * more explanations). + * + * @param request the {@code PrepareRequest} to prepare. + * @return a {@code CompletionStage} that, once complete, will produce the prepared statement. + */ + @NonNull + default CompletionStage prepareAsync(PrepareRequest request) { + return Objects.requireNonNull( + execute(request, PrepareRequest.ASYNC), + "The CQL prepare processor should never return a null result"); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java index a21c0ee8cd6..05a292ccbd0 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/AsyncResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java index 1219dad4475..9deb33c6007 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +26,7 @@ import com.datastax.oss.driver.internal.core.util.Sizes; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.protocol.internal.PrimitiveSizes; +import edu.umd.cs.findbugs.annotations.CheckReturnValue; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.ArrayList; @@ -57,15 +60,16 @@ static BatchStatement newInstance(@NonNull BatchType batchType) { null, null, Collections.emptyMap(), + null, false, - false, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, null, Integer.MIN_VALUE, null, null, null, - null); + null, + Statement.NO_NOW_IN_SECONDS); } /** @@ -87,15 +91,16 @@ static BatchStatement newInstance( null, null, Collections.emptyMap(), + null, false, - false, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, null, Integer.MIN_VALUE, null, null, null, - null); + null, + Statement.NO_NOW_IN_SECONDS); } /** @@ -117,18 +122,23 @@ static BatchStatement newInstance( null, null, Collections.emptyMap(), + null, false, - false, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, null, Integer.MIN_VALUE, null, null, null, - null); + null, + Statement.NO_NOW_IN_SECONDS); } - /** Returns a builder to create an instance of the default implementation. */ + /** + * Returns a builder to create an instance of the default implementation. + * + *

Note that this builder is mutable and not thread-safe. + */ @NonNull static BatchStatementBuilder builder(@NonNull BatchType batchType) { return new BatchStatementBuilder(batchType); @@ -137,6 +147,8 @@ static BatchStatementBuilder builder(@NonNull BatchType batchType) { /** * Returns a builder to create an instance of the default implementation, copying the fields of * the given statement. + * + *

Note that this builder is mutable and not thread-safe. */ @NonNull static BatchStatementBuilder builder(@NonNull BatchStatement template) { @@ -153,6 +165,7 @@ static BatchStatementBuilder builder(@NonNull BatchStatement template) { * method. However custom implementations may choose to be mutable and return the same instance. */ @NonNull + @CheckReturnValue BatchStatement setBatchType(@NonNull BatchType newBatchType); /** @@ -169,6 +182,7 @@ static BatchStatementBuilder builder(@NonNull BatchStatement template) { * @see Request#getKeyspace() */ @NonNull + @CheckReturnValue BatchStatement setKeyspace(@Nullable CqlIdentifier newKeyspace); /** @@ -176,6 +190,7 @@ static BatchStatementBuilder builder(@NonNull BatchStatement template) { * setKeyspace(CqlIdentifier.fromCql(newKeyspaceName))}. */ @NonNull + @CheckReturnValue default BatchStatement setKeyspace(@NonNull String newKeyspaceName) { return setKeyspace(CqlIdentifier.fromCql(newKeyspaceName)); } @@ -190,6 +205,7 @@ default BatchStatement setKeyspace(@NonNull String newKeyspaceName) { * method. However custom implementations may choose to be mutable and return the same instance. */ @NonNull + @CheckReturnValue BatchStatement add(@NonNull BatchableStatement statement); /** @@ -202,10 +218,12 @@ default BatchStatement setKeyspace(@NonNull String newKeyspaceName) { * method. However custom implementations may choose to be mutable and return the same instance. */ @NonNull + @CheckReturnValue BatchStatement addAll(@NonNull Iterable> statements); /** @see #addAll(Iterable) */ @NonNull + @CheckReturnValue default BatchStatement addAll(@NonNull BatchableStatement... statements) { return addAll(Arrays.asList(statements)); } @@ -220,6 +238,7 @@ default BatchStatement addAll(@NonNull BatchableStatement... statements) { * method. However custom implementations may choose to be mutable and return the same instance. */ @NonNull + @CheckReturnValue BatchStatement clear(); @Override @@ -251,7 +270,7 @@ default int computeSizeInBytes(@NonNull DriverContext context) { // timestamp if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Long.MIN_VALUE) { + || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { size += PrimitiveSizes.LONG; } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java index de3283b4a36..a8e2b8ab659 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchStatementBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +26,11 @@ import java.util.Arrays; import net.jcip.annotations.NotThreadSafe; +/** + * A builder to create a batch statement. + * + *

This class is mutable and not thread-safe. + */ @NotThreadSafe public class BatchStatementBuilder extends StatementBuilder { @@ -147,7 +154,8 @@ public BatchStatement build() { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } public int getStatementsCount() { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java index f81d6c326bf..6b0a7f09688 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +26,10 @@ */ public interface BatchType { + BatchType LOGGED = DefaultBatchType.LOGGED; + BatchType UNLOGGED = DefaultBatchType.UNLOGGED; + BatchType COUNTER = DefaultBatchType.COUNTER; + /** The numerical value that the batch type is encoded to. */ byte getProtocolCode(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java index 5fb50fc5348..a25f625bae9 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BatchableStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java index dc9577ae23e..64f0f22a051 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Bindable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java index 073ec3a97ca..bd7c142907f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -86,7 +88,7 @@ default int computeSizeInBytes(@NonNull DriverContext context) { // timestamp if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Long.MIN_VALUE) { + || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { size += PrimitiveSizes.LONG; } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java index 579dd8e399b..7e8f8723e1b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,9 +29,15 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; import java.time.Duration; +import java.util.List; import java.util.Map; import net.jcip.annotations.NotThreadSafe; +/** + * A builder to create a bound statement. + * + *

This class is mutable and not thread-safe. + */ @NotThreadSafe public class BoundStatementBuilder extends StatementBuilder implements Bindable { @@ -93,6 +101,22 @@ public BoundStatementBuilder(@NonNull BoundStatement template) { this.node = template.getNode(); } + /** The prepared statement that was used to create this statement. */ + @NonNull + public PreparedStatement getPreparedStatement() { + return preparedStatement; + } + + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + List indices = variableDefinitions.allIndicesOf(id); + if (indices.isEmpty()) { + throw new IllegalArgumentException(id + " is not a variable in this bound statement"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { int indexOf = variableDefinitions.firstIndexOf(id); @@ -102,6 +126,16 @@ public int firstIndexOf(@NonNull CqlIdentifier id) { return indexOf; } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + List indices = variableDefinitions.allIndicesOf(name); + if (indices.isEmpty()) { + throw new IllegalArgumentException(name + " is not a variable in this bound statement"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull String name) { int indexOf = variableDefinitions.firstIndexOf(name); @@ -169,6 +203,7 @@ public BoundStatement build() { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java index 5bdee0410ad..cb48f058be4 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinition.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java index 15b206c0a6f..7a775064317 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ColumnDefinitions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +20,10 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.data.AccessibleByName; import com.datastax.oss.driver.api.core.detach.Detachable; +import com.datastax.oss.driver.internal.core.util.Loggers; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.List; /** * Metadata about a set of CQL columns. @@ -97,22 +102,61 @@ default ColumnDefinition get(@NonNull CqlIdentifier name) { /** Whether there is a definition using the given CQL identifier. */ boolean contains(@NonNull CqlIdentifier id); + /** + * Returns the indices of all columns that use the given name. + * + *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be + * interpreted according to the rules described in {@link AccessibleByName}. + * + * @return the indices, or an empty list if no column uses this name. + * @apiNote the default implementation only exists for backward compatibility. It wraps the result + * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it + * will only return the first occurrence. Therefore it also logs a warning. + *

Implementors should always override this method (all built-in driver implementations + * do). + */ + @NonNull + default List allIndicesOf(@NonNull String name) { + Loggers.COLUMN_DEFINITIONS.warn( + "{} should override allIndicesOf(String), the default implementation is a " + + "workaround for backward compatibility, it only returns the first occurrence", + getClass().getName()); + return Collections.singletonList(firstIndexOf(name)); + } + /** * Returns the index of the first column that uses the given name. * *

Because raw strings are ambiguous with regard to case-sensitivity, the argument will be * interpreted according to the rules described in {@link AccessibleByName}. * - *

Also, note that if multiple columns use the same name, there is no way to find the index for - * the next occurrences. One way to avoid this is to use aliases in your CQL queries. + * @return the index, or -1 if no column uses this name. */ int firstIndexOf(@NonNull String name); + /** + * Returns the indices of all columns that use the given identifier. + * + * @return the indices, or an empty list if no column uses this identifier. + * @apiNote the default implementation only exists for backward compatibility. It wraps the result + * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, + * as it will only return the first occurrence. Therefore it also logs a warning. + *

Implementors should always override this method (all built-in driver implementations + * do). + */ + @NonNull + default List allIndicesOf(@NonNull CqlIdentifier id) { + Loggers.COLUMN_DEFINITIONS.warn( + "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " + + "workaround for backward compatibility, it only returns the first occurrence", + getClass().getName()); + return Collections.singletonList(firstIndexOf(id)); + } + /** * Returns the index of the first column that uses the given identifier. * - *

Note that if multiple columns use the same identifier, there is no way to find the index for - * the next occurrences. One way to avoid this is to use aliases in your CQL queries. + * @return the index, or -1 if no column uses this identifier. */ int firstIndexOf(@NonNull CqlIdentifier id); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java index f941d48906d..f699438df59 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/DefaultBatchType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,6 +40,9 @@ public enum DefaultBatchType implements BatchType { */ COUNTER(ProtocolConstants.BatchType.COUNTER), ; + // Note that, for the sake of convenience, we also expose shortcuts to these constants on the + // BatchType interface. If you add a new enum constant, remember to update the interface as + // well. private final byte code; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java index 5187966b720..40cfca827d1 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ExecutionInfo.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,11 +21,14 @@ import com.datastax.oss.driver.api.core.DriverException; import com.datastax.oss.driver.api.core.RequestThrottlingException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.retry.RetryDecision; import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; +import com.datastax.oss.driver.internal.core.cql.DefaultPagingState; import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import edu.umd.cs.findbugs.annotations.NonNull; @@ -45,8 +50,20 @@ */ public interface ExecutionInfo { - /** The statement that was executed. */ + /** @return The {@link Request} that was executed. */ @NonNull + default Request getRequest() { + return getStatement(); + } + + /** + * @return The {@link Request} that was executed, if it can be cast to {@link Statement}. + * @deprecated Use {@link #getRequest()} instead. + * @throws ClassCastException If the request that was executed cannot be cast to {@link + * Statement}. + */ + @NonNull + @Deprecated Statement getStatement(); /** @@ -97,16 +114,45 @@ public interface ExecutionInfo { List> getErrors(); /** - * The paging state of the query. + * The paging state of the query, in its raw form. * *

This represents the next page to be fetched if this query has multiple page of results. It * can be saved and reused later on the same statement. * + *

Note that this is the equivalent of driver 3's {@code getPagingStateUnsafe()}. If you're + * looking for the method that returns a {@link PagingState}, use {@link #getSafePagingState()}. + * * @return the paging state, or {@code null} if there is no next page. */ @Nullable ByteBuffer getPagingState(); + /** + * The paging state of the query, in a safe wrapper that checks if it's reused on the right + * statement. + * + *

This represents the next page to be fetched if this query has multiple page of results. It + * can be saved and reused later on the same statement. + * + * @return the paging state, or {@code null} if there is no next page. + */ + @Nullable + default PagingState getSafePagingState() { + // Default implementation for backward compatibility, but we override it in the concrete class, + // because it knows the attachment point. + ByteBuffer rawPagingState = getPagingState(); + if (rawPagingState == null) { + return null; + } else { + Request request = getRequest(); + if (!(request instanceof Statement)) { + throw new IllegalStateException("Only statements should have a paging state"); + } + Statement statement = (Statement) request; + return new DefaultPagingState(rawPagingState, statement, AttachmentPoint.NONE); + } + } + /** * The server-side warnings for this query, if any (otherwise the list will be empty). * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java new file mode 100644 index 00000000000..b9042f99841 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PagingState.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cql; + +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.cql.DefaultPagingState; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; + +/** + * A safe wrapper around the paging state of a query. + * + *

This class performs additional checks to fail fast if the paging state is not reused on the + * same query, and it provides utility methods for conversion to/from strings and byte arrays. + * + *

The serialized form returned by {@link #toBytes()} and {@link Object#toString()} is an opaque + * sequence of bytes. Note however that it is not cryptographically secure: the contents are + * not encrypted and the checks are performed with a simple MD5 checksum. If you need stronger + * guarantees, you should build your own wrapper around {@link ExecutionInfo#getPagingState()}. + */ +public interface PagingState { + + /** Parses an instance from a string previously generated with {@code toString()}. */ + @NonNull + static PagingState fromString(@NonNull String string) { + return DefaultPagingState.fromString(string); + } + + /** Parses an instance from a byte array previously generated with {@link #toBytes()}. */ + @NonNull + static PagingState fromBytes(byte[] bytes) { + return DefaultPagingState.fromBytes(bytes); + } + + /** Returns a representation of this object as a byte array. */ + byte[] toBytes(); + + /** + * Checks if this paging state can be safely reused for the given statement. Specifically, the + * query string and any bound values must match. + * + *

Note that, if {@code statement} is a {@link SimpleStatement} with bound values, those values + * must be encoded in order to perform the check. This method uses the default codec registry and + * default protocol version. This might fail if you use custom codecs; in that case, use {@link + * #matches(Statement, Session)} instead. + * + *

If {@code statement} is a {@link BoundStatement}, it is always safe to call this method. + */ + default boolean matches(@NonNull Statement statement) { + return matches(statement, null); + } + + /** + * Alternative to {@link #matches(Statement)} that specifies the session the statement will be + * executed with. You only need this for simple statements, and if you use custom codecs. + * Bound statements already know which session they are attached to. + */ + boolean matches(@NonNull Statement statement, @Nullable Session session); + + /** + * Returns the underlying "unsafe" paging state (the equivalent of {@link + * ExecutionInfo#getPagingState()}). + */ + @NonNull + ByteBuffer getRawPagingState(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java index 3e7308ccd4f..eb04f26c046 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PrepareRequest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java index b9f9a0fdccf..7828f9f809c 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/PreparedStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -134,6 +136,8 @@ void setResultMetadata( /** * Returns a builder to construct an executable statement. * + *

Note that this builder is mutable and not thread-safe. + * * @see #bind(Object...) */ @NonNull diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java index 4af0648ce4c..37ebb85c0db 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/QueryTrace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.UUID; @@ -39,10 +42,29 @@ public interface QueryTrace { /** The server-side duration of the query in microseconds. */ int getDurationMicros(); - /** The IP of the node that coordinated the query. */ + /** + * @deprecated returns the coordinator IP, but {@link #getCoordinatorAddress()} should be + * preferred, since C* 4.0 and above now returns the port was well. + */ @NonNull + @Deprecated InetAddress getCoordinator(); + /** + * The IP and port of the node that coordinated the query. Prior to C* 4.0 the port is not set and + * will default to 0. + * + *

This method's default implementation returns {@link #getCoordinator()} with the port set to + * 0. The only reason it exists is to preserve binary compatibility. Internally, the driver + * overrides it to set the correct port. + * + * @since 4.6.0 + */ + @NonNull + default InetSocketAddress getCoordinatorAddress() { + return new InetSocketAddress(getCoordinator(), 0); + } + /** The parameters attached to this trace. */ @NonNull Map getParameters(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java index d4383476ca3..54f786b2068 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/ResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java index 9a2e88e27e8..5eab449b057 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Row.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +21,7 @@ import com.datastax.oss.driver.api.core.data.GettableByIndex; import com.datastax.oss.driver.api.core.data.GettableByName; import com.datastax.oss.driver.api.core.detach.Detachable; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import edu.umd.cs.findbugs.annotations.NonNull; /** @@ -33,4 +36,55 @@ public interface Row extends GettableByIndex, GettableByName, GettableById, Deta /** @return the column definitions contained in this result set. */ @NonNull ColumnDefinitions getColumnDefinitions(); + + /** + * Returns a string representation of the contents of this row. + * + *

This produces a comma-separated list enclosed in square brackets. Each column is represented + * by its name, followed by a column and the value as a CQL literal. For example: + * + *

+   * [id:1, name:'test']
+   * 
+ * + * Notes: + * + *
    + *
  • This method does not sanitize its output in any way. In particular, no effort is made to + * limit output size: all columns are included, and large strings or blobs will be appended + * as-is. + *
  • Be mindful of how you expose the result. For example, in high-security environments, it + * might be undesirable to leak data in application logs. + *
+ */ + @NonNull + default String getFormattedContents() { + StringBuilder result = new StringBuilder("["); + ColumnDefinitions definitions = getColumnDefinitions(); + for (int i = 0; i < definitions.size(); i++) { + if (i > 0) { + result.append(", "); + } + ColumnDefinition definition = definitions.get(i); + String name = definition.getName().asCql(true); + TypeCodec codec = codecRegistry().codecFor(definition.getType()); + Object value = codec.decode(getBytesUnsafe(i), protocolVersion()); + result.append(name).append(':').append(codec.format(value)); + } + return result.append("]").toString(); + } + + /** + * Returns an abstract representation of this object, that may not include the row's + * contents. + * + *

The driver's built-in {@link Row} implementation returns the default format of {@link + * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. + * + *

Omitting the contents was a deliberate choice, because we feel it would make it too easy to + * accidentally leak data (e.g. in application logs). If you want the contents, use {@link + * #getFormattedContents()}. + */ + @Override + String toString(); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java index f15efc7df7c..ef04cd14a5b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,7 @@ import com.datastax.oss.protocol.internal.PrimitiveSizes; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import edu.umd.cs.findbugs.annotations.CheckReturnValue; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.List; @@ -74,13 +77,14 @@ static SimpleStatement newInstance(@NonNull String cqlQuery) { NullAllowingImmutableMap.of(), null, false, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, null, Integer.MIN_VALUE, null, null, null, - null); + null, + Statement.NO_NOW_IN_SECONDS); } /** @@ -107,13 +111,14 @@ static SimpleStatement newInstance( NullAllowingImmutableMap.of(), null, false, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, null, Integer.MIN_VALUE, null, null, null, - null); + null, + Statement.NO_NOW_IN_SECONDS); } /** @@ -137,16 +142,21 @@ static SimpleStatement newInstance( NullAllowingImmutableMap.of(), null, false, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, null, Integer.MIN_VALUE, null, null, null, - null); + null, + Statement.NO_NOW_IN_SECONDS); } - /** Returns a builder to create an instance of the default implementation. */ + /** + * Returns a builder to create an instance of the default implementation. + * + *

Note that this builder is mutable and not thread-safe. + */ @NonNull static SimpleStatementBuilder builder(@NonNull String query) { return new SimpleStatementBuilder(query); @@ -155,6 +165,8 @@ static SimpleStatementBuilder builder(@NonNull String query) { /** * Returns a builder to create an instance of the default implementation, copying the fields of * the given statement. + * + *

Note that this builder is mutable and not thread-safe. */ @NonNull static SimpleStatementBuilder builder(@NonNull SimpleStatement template) { @@ -186,6 +198,7 @@ static SimpleStatementBuilder builder(@NonNull SimpleStatement template) { * @see #setNamedValuesWithIds(Map) */ @NonNull + @CheckReturnValue SimpleStatement setQuery(@NonNull String newQuery); /** @@ -198,6 +211,7 @@ static SimpleStatementBuilder builder(@NonNull SimpleStatement template) { * @see Request#getKeyspace() */ @NonNull + @CheckReturnValue SimpleStatement setKeyspace(@Nullable CqlIdentifier newKeyspace); /** @@ -205,6 +219,7 @@ static SimpleStatementBuilder builder(@NonNull SimpleStatement template) { * setKeyspace(CqlIdentifier.fromCql(newKeyspaceName))}. */ @NonNull + @CheckReturnValue default SimpleStatement setKeyspace(@NonNull String newKeyspaceName) { return setKeyspace(CqlIdentifier.fromCql(newKeyspaceName)); } @@ -225,6 +240,7 @@ default SimpleStatement setKeyspace(@NonNull String newKeyspaceName) { * @see #setQuery(String) */ @NonNull + @CheckReturnValue SimpleStatement setPositionalValues(@NonNull List newPositionalValues); @NonNull @@ -245,6 +261,7 @@ default SimpleStatement setKeyspace(@NonNull String newKeyspaceName) { * @see #setQuery(String) */ @NonNull + @CheckReturnValue SimpleStatement setNamedValuesWithIds(@NonNull Map newNamedValues); /** @@ -252,6 +269,7 @@ default SimpleStatement setKeyspace(@NonNull String newKeyspaceName) { * converted on the fly with {@link CqlIdentifier#fromCql(String)}. */ @NonNull + @CheckReturnValue default SimpleStatement setNamedValues(@NonNull Map newNamedValues) { return setNamedValuesWithIds(DefaultSimpleStatement.wrapKeys(newNamedValues)); } @@ -291,7 +309,7 @@ default int computeSizeInBytes(@NonNull DriverContext context) { // timestamp if (!(context.getTimestampGenerator() instanceof ServerSideTimestampGenerator) - || getQueryTimestamp() != Long.MIN_VALUE) { + || getQueryTimestamp() != Statement.NO_DEFAULT_TIMESTAMP) { size += PrimitiveSizes.LONG; } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java index 4a1a9e32233..1ac910ff6a7 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SimpleStatementBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,11 @@ import java.util.Map; import net.jcip.annotations.NotThreadSafe; +/** + * A builder to create a simple statement. + * + *

This class is mutable and not thread-safe. + */ @NotThreadSafe public class SimpleStatementBuilder extends StatementBuilder { @@ -177,6 +184,7 @@ public SimpleStatement build() { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java index c06b24e1982..d70c56686c5 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/Statement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,6 +33,7 @@ import com.datastax.oss.driver.api.core.time.TimestampGenerator; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.internal.core.util.RoutingKey; +import com.datastax.oss.protocol.internal.request.query.QueryOptions; import edu.umd.cs.findbugs.annotations.CheckReturnValue; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -67,11 +70,25 @@ public interface Statement> extends Request { GenericType> ASYNC = new GenericType>() {}; + /** + * A special value for {@link #getQueryTimestamp()} that means "no value". + * + *

It is equal to {@link Long#MIN_VALUE}. + */ + long NO_DEFAULT_TIMESTAMP = QueryOptions.NO_DEFAULT_TIMESTAMP; + + /** + * A special value for {@link #getNowInSeconds()} that means "no value". + * + *

It is equal to {@link Integer#MIN_VALUE}. + */ + int NO_NOW_IN_SECONDS = QueryOptions.NO_NOW_IN_SECONDS; + /** * Sets the name of the execution profile that will be used for this statement. * - *

For all the driver's built-in implementations, this method has no effect if {@link - * #setExecutionProfile(DriverExecutionProfile)} has been called with a non-null argument. + *

For all the driver's built-in implementations, calling this method with a non-null argument + * automatically resets {@link #getExecutionProfile()} to null. * *

All the driver's built-in implementations are immutable, and return a new instance from this * method. However custom implementations may choose to be mutable and return the same instance. @@ -83,6 +100,9 @@ public interface Statement> extends Request { /** * Sets the execution profile to use for this statement. * + *

For all the driver's built-in implementations, calling this method with a non-null argument + * automatically resets {@link #getExecutionProfileName()} to null. + * *

All the driver's built-in implementations are immutable, and return a new instance from this * method. However custom implementations may choose to be mutable and return the same instance. */ @@ -215,30 +235,88 @@ default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { SelfT setTracing(boolean newTracing); /** - * Returns the query timestamp, in microseconds, to send with the statement. + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setTracing(boolean) setTracing(true)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT enableTracing() { + return setTracing(true); + } + + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setTracing(boolean) setTracing(false)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT disableTracing() { + return setTracing(false); + } + + /** + * Returns the query timestamp, in microseconds, to send with the statement. See {@link + * #setQueryTimestamp(long)} for details. * - *

If this is equal to {@link Long#MIN_VALUE}, the {@link TimestampGenerator} configured for - * this driver instance will be used to generate a timestamp. + *

If this is equal to {@link #NO_DEFAULT_TIMESTAMP}, the {@link TimestampGenerator} configured + * for this driver instance will be used to generate a timestamp. * + * @see #NO_DEFAULT_TIMESTAMP * @see TimestampGenerator */ long getQueryTimestamp(); + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #getQueryTimestamp()}. + */ + @Deprecated + default long getDefaultTimestamp() { + return getQueryTimestamp(); + } + /** * Sets the query timestamp, in microseconds, to send with the statement. * - *

If this is equal to {@link Long#MIN_VALUE}, the {@link TimestampGenerator} configured for - * this driver instance will be used to generate a timestamp. + *

This is an alternative to appending a {@code USING TIMESTAMP} clause in the statement's + * query string, and has the advantage of sending the timestamp separately from the query string + * itself, which doesn't have to be modified when executing the same statement with different + * timestamps. Note that, if both a {@code USING TIMESTAMP} clause and a query timestamp are set + * for a given statement, the timestamp from the {@code USING TIMESTAMP} clause wins. + * + *

This method can be used on any instance of {@link SimpleStatement}, {@link BoundStatement} + * or {@link BatchStatement}. For a {@link BatchStatement}, the timestamp will apply to all its + * child statements; it is not possible to define per-child timestamps using this method, and + * consequently, if this method is called on a batch child statement, the provided timestamp will + * be silently ignored. If different timestamps are required for individual child statements, this + * can only be achieved with a custom {@code USING TIMESTAMP} clause in each child query. + * + *

If this is equal to {@link #NO_DEFAULT_TIMESTAMP}, the {@link TimestampGenerator} configured + * for this driver instance will be used to generate a timestamp. * *

All the driver's built-in implementations are immutable, and return a new instance from this * method. However custom implementations may choose to be mutable and return the same instance. * + * @see #NO_DEFAULT_TIMESTAMP * @see TimestampGenerator */ @NonNull @CheckReturnValue SelfT setQueryTimestamp(long newTimestamp); + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setQueryTimestamp(long)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT setDefaultTimestamp(long newTimestamp) { + return setQueryTimestamp(newTimestamp); + } + /** * Sets how long to wait for this request to complete. This is a global limit on the duration of a * session.execute() call, including any retries the driver might do. @@ -282,6 +360,50 @@ default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { @CheckReturnValue SelfT setPagingState(@Nullable ByteBuffer newPagingState); + /** + * Sets the paging state to send with the statement, or {@code null} if this statement has no + * paging state. + * + *

This variant uses the "safe" paging state wrapper, it will throw immediately if the + * statement doesn't match the one that the state was initially extracted from (same query string, + * same parameters). The advantage is that it fails fast, instead of waiting for an error response + * from the server. + * + *

Note that, if this statement is a {@link SimpleStatement} with bound values, those values + * must be encoded in order to perform the check. This method uses the default codec registry and + * default protocol version. This might fail if you use custom codecs; in that case, use {@link + * #setPagingState(PagingState, Session)} instead. + * + * @throws IllegalArgumentException if the given state does not match this statement. + * @see #setPagingState(ByteBuffer) + * @see ExecutionInfo#getSafePagingState() + */ + @NonNull + @CheckReturnValue + default SelfT setPagingState(@Nullable PagingState newPagingState) { + return setPagingState(newPagingState, null); + } + + /** + * Alternative to {@link #setPagingState(PagingState)} that specifies the session the statement + * will be executed with. You only need this for simple statements, and if you use custom + * codecs. Bound statements already know which session they are attached to. + */ + @NonNull + @CheckReturnValue + default SelfT setPagingState(@Nullable PagingState newPagingState, @Nullable Session session) { + if (newPagingState == null) { + return setPagingState((ByteBuffer) null); + } else if (newPagingState.matches(this, session)) { + return setPagingState(newPagingState.getRawPagingState()); + } else { + throw new IllegalArgumentException( + "Paging state mismatch, " + + "this means that either the paging state contents were altered, " + + "or you're trying to apply it to a different statement"); + } + } + /** * Returns the page size to use for the statement. * @@ -291,6 +413,15 @@ default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { */ int getPageSize(); + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #getPageSize()}. + */ + @Deprecated + default int getFetchSize() { + return getPageSize(); + } + /** * Configures how many rows will be retrieved simultaneously in a single network roundtrip (the * goal being to avoid loading too many results in memory at the same time). @@ -303,6 +434,17 @@ default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { @CheckReturnValue SelfT setPageSize(int newPageSize); + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setPageSize(int)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT setFetchSize(int newPageSize) { + return setPageSize(newPageSize); + } + /** * Returns the {@link ConsistencyLevel} to use for the statement. * @@ -348,6 +490,35 @@ default SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { /** Whether tracing information should be recorded for this statement. */ boolean isTracing(); + /** + * A custom "now in seconds" to use when applying the request (for testing purposes). + * + *

This method's default implementation returns {@link #NO_NOW_IN_SECONDS}. The only reason it + * exists is to preserve binary compatibility. Internally, the driver overrides it to return the + * value that was set programmatically (if any). + * + * @see #NO_NOW_IN_SECONDS + */ + default int getNowInSeconds() { + return NO_NOW_IN_SECONDS; + } + + /** + * Sets the "now in seconds" to use when applying the request (for testing purposes). + * + *

This method's default implementation returns the statement unchanged. The only reason it + * exists is to preserve binary compatibility. Internally, the driver overrides it to record the + * new value. + * + * @see #NO_NOW_IN_SECONDS + */ + @NonNull + @CheckReturnValue + @SuppressWarnings("unchecked") + default SelfT setNowInSeconds(int nowInSeconds) { + return (SelfT) this; + } + /** * Calculates the approximate size in bytes that the statement will have when encoded. * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java index 209672fa412..531070b854c 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/StatementBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.internal.core.util.RoutingKey; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -50,13 +53,14 @@ public abstract class StatementBuilder< @Nullable private NullAllowingImmutableMap.Builder customPayloadBuilder; @Nullable protected Boolean idempotent; protected boolean tracing; - protected long timestamp = Long.MIN_VALUE; + protected long timestamp = Statement.NO_DEFAULT_TIMESTAMP; @Nullable protected ByteBuffer pagingState; protected int pageSize = Integer.MIN_VALUE; @Nullable protected ConsistencyLevel consistencyLevel; @Nullable protected ConsistencyLevel serialConsistencyLevel; @Nullable protected Duration timeout; @Nullable protected Node node; + protected int nowInSeconds = Statement.NO_NOW_IN_SECONDS; protected StatementBuilder() { // nothing to do @@ -82,12 +86,16 @@ protected StatementBuilder(StatementT template) { this.serialConsistencyLevel = template.getSerialConsistencyLevel(); this.timeout = template.getTimeout(); this.node = template.getNode(); + this.nowInSeconds = template.getNowInSeconds(); } /** @see Statement#setExecutionProfileName(String) */ @NonNull public SelfT setExecutionProfileName(@Nullable String executionProfileName) { this.executionProfileName = executionProfileName; + if (executionProfileName != null) { + this.executionProfile = null; + } return self; } @@ -95,7 +103,9 @@ public SelfT setExecutionProfileName(@Nullable String executionProfileName) { @NonNull public SelfT setExecutionProfile(@Nullable DriverExecutionProfile executionProfile) { this.executionProfile = executionProfile; - this.executionProfileName = null; + if (executionProfile != null) { + this.executionProfileName = null; + } return self; } @@ -123,6 +133,12 @@ public SelfT setRoutingKey(@Nullable ByteBuffer routingKey) { return self; } + /** @see Statement#setRoutingKey(ByteBuffer...) */ + @NonNull + public SelfT setRoutingKey(@NonNull ByteBuffer... newRoutingKeyComponents) { + return setRoutingKey(RoutingKey.compose(newRoutingKeyComponents)); + } + /** @see Statement#setRoutingToken(Token) */ @NonNull public SelfT setRoutingToken(@Nullable Token routingToken) { @@ -154,13 +170,44 @@ public SelfT setIdempotence(@Nullable Boolean idempotent) { return self; } - /** @see Statement#setTracing(boolean) */ + /** + * This method is a shortcut to {@link #setTracing(boolean)} with an argument of true. It is + * preserved to maintain API compatibility. + * + * @see Statement#setTracing(boolean) + */ @NonNull public SelfT setTracing() { - this.tracing = true; + return setTracing(true); + } + + /** @see Statement#setTracing(boolean) */ + @NonNull + public SelfT setTracing(boolean tracing) { + this.tracing = tracing; return self; } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setTracing(boolean) setTracing(true)}. + */ + @Deprecated + @NonNull + public SelfT enableTracing() { + return setTracing(true); + } + + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setTracing(boolean) setTracing(false)}. + */ + @Deprecated + @NonNull + public SelfT disableTracing() { + return setTracing(false); + } + /** @see Statement#setQueryTimestamp(long) */ @NonNull public SelfT setQueryTimestamp(long timestamp) { @@ -168,6 +215,16 @@ public SelfT setQueryTimestamp(long timestamp) { return self; } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setQueryTimestamp(long)}. + */ + @Deprecated + @NonNull + public SelfT setDefaultTimestamp(long timestamp) { + return setQueryTimestamp(timestamp); + } + /** @see Statement#setPagingState(ByteBuffer) */ @NonNull public SelfT setPagingState(@Nullable ByteBuffer pagingState) { @@ -182,6 +239,16 @@ public SelfT setPageSize(int pageSize) { return self; } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setPageSize(int)}. + */ + @Deprecated + @NonNull + public SelfT setFetchSize(int pageSize) { + return this.setPageSize(pageSize); + } + /** @see Statement#setConsistencyLevel(ConsistencyLevel) */ @NonNull public SelfT setConsistencyLevel(@Nullable ConsistencyLevel consistencyLevel) { @@ -209,6 +276,12 @@ public SelfT setNode(@Nullable Node node) { return self; } + /** @see Statement#setNowInSeconds(int) */ + public SelfT setNowInSeconds(int nowInSeconds) { + this.nowInSeconds = nowInSeconds; + return self; + } + @NonNull protected Map buildCustomPayload() { return (customPayloadBuilder == null) diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java new file mode 100644 index 00000000000..a0f752db407 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/SyncCqlSession.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cql; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.servererrors.QueryExecutionException; +import com.datastax.oss.driver.api.core.servererrors.QueryValidationException; +import com.datastax.oss.driver.api.core.servererrors.SyntaxError; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.cql.DefaultPrepareRequest; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Objects; + +/** + * A session that offers user-friendly methods to execute CQL requests synchronously. + * + * @since 4.4.0 + */ +public interface SyncCqlSession extends Session { + + /** + * Executes a CQL statement synchronously (the calling thread blocks until the result becomes + * available). + * + * @param statement the CQL query to execute (that can be any {@link Statement}). + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query is invalid (syntax error, unauthorized or any + * other validation problem). + */ + @NonNull + default ResultSet execute(@NonNull Statement statement) { + return Objects.requireNonNull( + execute(statement, Statement.SYNC), "The CQL processor should never return a null result"); + } + + /** + * Executes a CQL statement synchronously (the calling thread blocks until the result becomes + * available). + * + *

This is an alias for {@link #execute(Statement) + * execute(SimpleStatement.newInstance(query))}. + * + * @param query the CQL query to execute. + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @see SimpleStatement#newInstance(String) + */ + @NonNull + default ResultSet execute(@NonNull String query) { + return execute(SimpleStatement.newInstance(query)); + } + + /** + * Executes a CQL statement synchronously (the calling thread blocks until the result becomes + * available). + * + *

This is an alias for {@link #execute(Statement) execute(SimpleStatement.newInstance(query, + * values))}. + * + * @param query the CQL query to execute. + * @param values the values for placeholders in the query string. Individual values can be {@code + * null}, but the vararg array itself can't. + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @see SimpleStatement#newInstance(String, Object...) + */ + @NonNull + default ResultSet execute(@NonNull String query, @NonNull Object... values) { + return execute(SimpleStatement.newInstance(query, values)); + } + + /** + * Executes a CQL statement synchronously (the calling thread blocks until the result becomes + * available). + * + *

This is an alias for {@link #execute(Statement) execute(SimpleStatement.newInstance(query, + * values))}. + * + * @param query the CQL query to execute. + * @param values the values for named placeholders in the query string. Individual values can be + * {@code null}, but the map itself can't. + * @return the result of the query. That result will never be null but can be empty (and will be + * for any non SELECT query). + * @throws AllNodesFailedException if no host in the cluster can be contacted successfully to + * execute this query. + * @throws QueryExecutionException if the query triggered an execution exception, i.e. an + * exception thrown by Cassandra when it cannot execute the query with the requested + * consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, unauthorized or any + * other validation problem). + * @see SimpleStatement#newInstance(String, Map) + */ + @NonNull + default ResultSet execute(@NonNull String query, @NonNull Map values) { + return execute(SimpleStatement.newInstance(query, values)); + } + + /** + * Prepares a CQL statement synchronously (the calling thread blocks until the statement is + * prepared). + * + *

Note that the bound statements created from the resulting prepared statement will inherit + * some of the attributes of the provided simple statement. That is, given: + * + *

{@code
+   * SimpleStatement simpleStatement = SimpleStatement.newInstance("...");
+   * PreparedStatement preparedStatement = session.prepare(simpleStatement);
+   * BoundStatement boundStatement = preparedStatement.bind();
+   * }
+ * + * Then: + * + *
    + *
  • the following methods return the same value as their counterpart on {@code + * simpleStatement}: + *
      + *
    • {@link Request#getExecutionProfileName() boundStatement.getExecutionProfileName()} + *
    • {@link Request#getExecutionProfile() boundStatement.getExecutionProfile()} + *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} + *
    • {@link Request#getRoutingKey() boundStatement.getRoutingKey()} + *
    • {@link Request#getRoutingToken() boundStatement.getRoutingToken()} + *
    • {@link Request#getCustomPayload() boundStatement.getCustomPayload()} + *
    • {@link Request#isIdempotent() boundStatement.isIdempotent()} + *
    • {@link Request#getTimeout() boundStatement.getTimeout()} + *
    • {@link Statement#getPagingState() boundStatement.getPagingState()} + *
    • {@link Statement#getPageSize() boundStatement.getPageSize()} + *
    • {@link Statement#getConsistencyLevel() boundStatement.getConsistencyLevel()} + *
    • {@link Statement#getSerialConsistencyLevel() + * boundStatement.getSerialConsistencyLevel()} + *
    • {@link Statement#isTracing() boundStatement.isTracing()} + *
    + *
  • {@link Request#getRoutingKeyspace() boundStatement.getRoutingKeyspace()} is set from + * either {@link Request#getKeyspace() simpleStatement.getKeyspace()} (if it's not {@code + * null}), or {@code simpleStatement.getRoutingKeyspace()}; + *
  • on the other hand, the following attributes are not propagated: + *
      + *
    • {@link Statement#getQueryTimestamp() boundStatement.getQueryTimestamp()} will be + * set to {@link Statement#NO_DEFAULT_TIMESTAMP}, meaning that the value will be + * assigned by the session's timestamp generator. + *
    • {@link Statement#getNode() boundStatement.getNode()} will always be {@code null}. + *
    • {@link Statement#getNowInSeconds()} boundStatement.getNowInSeconds()} will always + * be equal to {@link Statement#NO_NOW_IN_SECONDS}. + *
    + *
+ * + * If you want to customize this behavior, you can write your own implementation of {@link + * PrepareRequest} and pass it to {@link #prepare(PrepareRequest)}. + * + *

The result of this method is cached: if you call it twice with the same {@link + * SimpleStatement}, you will get the same {@link PreparedStatement} instance. We still recommend + * keeping a reference to it (for example by caching it as a field in a DAO); if that's not + * possible (e.g. if query strings are generated dynamically), it's OK to call this method every + * time: there will just be a small performance overhead to check the internal cache. Note that + * caching is based on: + * + *

    + *
  • the query string exactly as you provided it: the driver does not perform any kind of + * trimming or sanitizing. + *
  • all other execution parameters: for example, preparing two statements with identical + * query strings but different {@linkplain SimpleStatement#getConsistencyLevel() consistency + * levels} will yield distinct prepared statements. + *
+ * + * @param statement the CQL query to execute (that can be any {@link SimpleStatement}). + * @return the prepared statement corresponding to {@code statement}. + * @throws SyntaxError if the syntax of the query to prepare is not correct. + */ + @NonNull + default PreparedStatement prepare(@NonNull SimpleStatement statement) { + return Objects.requireNonNull( + execute(new DefaultPrepareRequest(statement), PrepareRequest.SYNC), + "The CQL prepare processor should never return a null result"); + } + + /** + * Prepares a CQL statement synchronously (the calling thread blocks until the statement is + * prepared). + * + *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more + * explanations). + * + * @param query the CQL string query to execute. + * @return the prepared statement corresponding to {@code query}. + * @throws SyntaxError if the syntax of the query to prepare is not correct. + */ + @NonNull + default PreparedStatement prepare(@NonNull String query) { + return Objects.requireNonNull( + execute(new DefaultPrepareRequest(query), PrepareRequest.SYNC), + "The CQL prepare processor should never return a null result"); + } + + /** + * Prepares a CQL statement synchronously (the calling thread blocks until the statement is + * prepared). + * + *

This variant is exposed in case you use an ad hoc {@link PrepareRequest} implementation to + * customize how attributes are propagated when you prepare a {@link SimpleStatement} (see {@link + * #prepare(SimpleStatement)} for more explanations). Otherwise, you should rarely have to deal + * with {@link PrepareRequest} directly. + * + *

The result of this method is cached (see {@link #prepare(SimpleStatement)} for more + * explanations). + * + * @param request the {@code PrepareRequest} to execute. + * @return the prepared statement corresponding to {@code request}. + * @throws SyntaxError if the syntax of the query to prepare is not correct. + */ + @NonNull + default PreparedStatement prepare(@NonNull PrepareRequest request) { + return Objects.requireNonNull( + execute(request, PrepareRequest.SYNC), + "The CQL prepare processor should never return a null result"); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java b/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java index c55e874cdc1..3043d94057f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/cql/TraceEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.net.InetAddress; +import java.net.InetSocketAddress; /** An event in a {@link QueryTrace}. */ public interface TraceEvent { @@ -28,10 +31,28 @@ public interface TraceEvent { /** The server-side timestamp of the event. */ long getTimestamp(); - /** The IP of the host having generated this event. */ + /** + * @deprecated returns the source IP, but {@link #getSourceAddress()} should be preferred, since + * C* 4.0 and above now returns the port was well. + */ @Nullable + @Deprecated InetAddress getSource(); + /** + * The IP and Port of the host having generated this event. Prior to C* 4.0 the port will be set + * to zero. + * + *

This method's default implementation returns {@link #getSource()} with the port set to 0. + * The only reason it exists is to preserve binary compatibility. Internally, the driver overrides + * it to set the correct port. + * + * @since 4.6.0 + */ + @Nullable + default InetSocketAddress getSourceAddress() { + return new InetSocketAddress(getSource(), 0); + } /** * The number of microseconds elapsed on the source when this event occurred since the moment when * the source started handling the query. diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java index f4cedb77c31..2ca2222424c 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleById.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,10 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.core.util.Loggers; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.List; /** * A data structure where the values are accessible via a CQL identifier. @@ -26,6 +31,25 @@ */ public interface AccessibleById extends AccessibleByIndex { + /** + * Returns all the indices where a given identifier appears. + * + * @throws IllegalArgumentException if the id is invalid. + * @apiNote the default implementation only exists for backward compatibility. It wraps the result + * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, + * as it will only return the first occurrence. Therefore it also logs a warning. + *

Implementors should always override this method (all built-in driver implementations + * do). + */ + @NonNull + default List allIndicesOf(@NonNull CqlIdentifier id) { + Loggers.ACCESSIBLE_BY_ID.warn( + "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " + + "workaround for backward compatibility, it only returns the first occurrence", + getClass().getName()); + return Collections.singletonList(firstIndexOf(id)); + } + /** * Returns the first index where a given identifier appears (depending on the implementation, * identifiers may appear multiple times). diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java index 509dd4866f9..3007ed1fb68 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByIndex.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java index ed7359b9c3e..74574a82f38 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/AccessibleByName.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,10 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.core.util.Loggers; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.List; /** * A data structure where the values are accessible via a name string. @@ -42,6 +47,25 @@ */ public interface AccessibleByName extends AccessibleByIndex { + /** + * Returns all the indices where a given identifier appears. + * + * @throws IllegalArgumentException if the name is invalid. + * @apiNote the default implementation only exists for backward compatibility. It wraps the result + * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it + * will only return the first occurrence. Therefore it also logs a warning. + *

Implementors should always override this method (all built-in driver implementations + * do). + */ + @NonNull + default List allIndicesOf(@NonNull String name) { + Loggers.ACCESSIBLE_BY_NAME.warn( + "{} should override allIndicesOf(String), the default implementation is a " + + "workaround for backward compatibility, it only returns the first occurrence", + getClass().getName()); + return Collections.singletonList(firstIndexOf(name)); + } + /** * Returns the first index where a given identifier appears (depending on the implementation, * identifiers may appear multiple times). diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java new file mode 100644 index 00000000000..d3dc68733e4 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/ByteUtils.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.data; + +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; + +/** + * A set of static utility methods to work with byte buffers (associated with CQL type {@code + * blob}). + */ +public class ByteUtils { + + // Implementation note: this is just a gateway to the internal `Bytes` class in native-protocol. + // The difference is that this one is part of the public API. + + /** + * Converts a blob to its CQL hex string representation. + * + *

A CQL blob string representation consists of the hexadecimal representation of the blob + * bytes prefixed by "0x". + * + * @param bytes the blob/bytes to convert to a string. + * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this + * method returns {@code null}. + */ + public static String toHexString(ByteBuffer bytes) { + return Bytes.toHexString(bytes); + } + + /** + * Converts a blob to its CQL hex string representation. + * + *

A CQL blob string representation consists of the hexadecimal representation of the blob + * bytes prefixed by "0x". + * + * @param byteArray the blob/bytes array to convert to a string. + * @return the CQL string representation of {@code bytes}. If {@code bytes} is {@code null}, this + * method returns {@code null}. + */ + public static String toHexString(byte[] byteArray) { + return Bytes.toHexString(byteArray); + } + + /** + * Parses a hex string representing a CQL blob. + * + *

The input should be a valid representation of a CQL blob, i.e. it must start by "0x" + * followed by the hexadecimal representation of the blob bytes. + * + * @param str the CQL blob string representation to parse. + * @return the bytes corresponding to {@code str}. If {@code str} is {@code null}, this method + * returns {@code null}. + * @throws IllegalArgumentException if {@code str} is not a valid CQL blob string. + */ + public static ByteBuffer fromHexString(String str) { + return Bytes.fromHexString(str); + } + + /** + * Extracts the content of the provided {@code ByteBuffer} as a byte array. + * + *

This method works with any type of {@code ByteBuffer} (direct and non-direct ones), but when + * the buffer is backed by an array, it will try to avoid copy when possible. As a consequence, + * changes to the returned byte array may or may not reflect into the initial buffer. + * + * @param bytes the buffer whose contents to extract. + * @return a byte array with the contents of {@code bytes}. That array may be the array backing + * {@code bytes} if this can avoid a copy. + */ + public static byte[] getArray(ByteBuffer bytes) { + return Bytes.getArray(bytes); + } + + private ByteUtils() {} +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java index 1db7e1d8d4f..bfa9df20bbb 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlDuration.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.time.Duration; import java.time.Period; import java.time.temporal.ChronoUnit; @@ -28,6 +31,7 @@ import java.time.temporal.TemporalUnit; import java.time.temporal.UnsupportedTemporalTypeException; import java.util.List; +import java.util.Locale; import java.util.regex.Matcher; import java.util.regex.Pattern; import net.jcip.annotations.Immutable; @@ -41,7 +45,9 @@ * in time, regardless of the calendar). */ @Immutable -public final class CqlDuration implements TemporalAmount { +public final class CqlDuration implements TemporalAmount, Serializable { + + private static final long serialVersionUID = 1L; @VisibleForTesting static final long NANOS_PER_MICRO = 1000L; @VisibleForTesting static final long NANOS_PER_MILLI = 1000 * NANOS_PER_MICRO; @@ -74,14 +80,17 @@ public final class CqlDuration implements TemporalAmount { private static final ImmutableList TEMPORAL_UNITS = ImmutableList.of(ChronoUnit.MONTHS, ChronoUnit.DAYS, ChronoUnit.NANOS); + /** @serial */ private final int months; + /** @serial */ private final int days; + /** @serial */ private final long nanoseconds; private CqlDuration(int months, int days, long nanoseconds) { // Makes sure that all the values are negative if one of them is if ((months < 0 || days < 0 || nanoseconds < 0) - && ((months > 0 || days > 0 || nanoseconds > 0))) { + && (months > 0 || days > 0 || nanoseconds > 0)) { throw new IllegalArgumentException( String.format( "All values must be either negative or positive, got %d months, %d days, %d nanoseconds", @@ -115,7 +124,7 @@ public static CqlDuration newInstance(int months, int days, long nanoseconds) { *

  • multiple digits followed by a time unit like: 12h30m where the time unit can be: *
      *
    • {@code y}: years - *
    • {@code m}: months + *
    • {@code mo}: months *
    • {@code w}: weeks *
    • {@code d}: days *
    • {@code h}: hours @@ -232,7 +241,7 @@ private static long groupAsLong(@NonNull Matcher matcher, int group) { } private static Builder add(@NonNull Builder builder, long number, @NonNull String symbol) { - String s = symbol.toLowerCase(); + String s = symbol.toLowerCase(Locale.ROOT); if (s.equals("y")) { return builder.addYears(number); } else if (s.equals("mo")) { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java new file mode 100644 index 00000000000..8089d551750 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/CqlVector.java @@ -0,0 +1,293 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.data; + +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.base.Predicates; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.IOException; +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; + +/** + * Representation of a vector as defined in CQL. + * + *

      A CQL vector is a fixed-length array of non-null numeric values. These properties don't map + * cleanly to an existing class in the standard JDK Collections hierarchy so we provide this value + * object instead. Like other value object collections returned by the driver instances of this + * class are not immutable; think of these value objects as a representation of a vector stored in + * the database as an initial step in some additional computation. + * + *

      While we don't implement any Collection APIs we do implement Iterable. We also attempt to play + * nice with the Streams API in order to better facilitate integration with data pipelines. Finally, + * where possible we've tried to make the API of this class similar to the equivalent methods on + * {@link List}. + */ +public class CqlVector implements Iterable, Serializable { + + /** + * Create a new CqlVector containing the specified values. + * + * @param vals the collection of values to wrap. + * @return a CqlVector wrapping those values + */ + public static CqlVector newInstance(V... vals) { + + // Note that Array.asList() guarantees the return of an array which implements RandomAccess + return new CqlVector(Arrays.asList(vals)); + } + + /** + * Create a new CqlVector that "wraps" an existing ArrayList. Modifications to the passed + * ArrayList will also be reflected in the returned CqlVector. + * + * @param list the collection of values to wrap. + * @return a CqlVector wrapping those values + */ + public static CqlVector newInstance(List list) { + Preconditions.checkArgument(list != null, "Input list should not be null"); + return new CqlVector(list); + } + + /** + * Create a new CqlVector instance from the specified string representation. + * + * @param str a String representation of a CqlVector + * @param subtypeCodec + * @return a new CqlVector built from the String representation + */ + public static CqlVector from(@NonNull String str, @NonNull TypeCodec subtypeCodec) { + Preconditions.checkArgument(str != null, "Cannot create CqlVector from null string"); + Preconditions.checkArgument(!str.isEmpty(), "Cannot create CqlVector from empty string"); + if (str.equalsIgnoreCase("NULL")) return null; + + int idx = ParseUtils.skipSpaces(str, 0); + if (str.charAt(idx++) != '[') + throw new IllegalArgumentException( + String.format( + "Cannot parse vector value from \"%s\", at character %d expecting '[' but got '%c'", + str, idx, str.charAt(idx))); + + idx = ParseUtils.skipSpaces(str, idx); + + if (str.charAt(idx) == ']') { + return new CqlVector<>(new ArrayList<>()); + } + + List list = new ArrayList<>(); + while (idx < str.length()) { + int n; + try { + n = ParseUtils.skipCQLValue(str, idx); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + "Cannot parse vector value from \"%s\", invalid CQL value at character %d", + str, idx), + e); + } + + list.add(subtypeCodec.parse(str.substring(idx, n))); + idx = n; + + idx = ParseUtils.skipSpaces(str, idx); + if (str.charAt(idx) == ']') return new CqlVector<>(list); + if (str.charAt(idx++) != ',') + throw new IllegalArgumentException( + String.format( + "Cannot parse vector value from \"%s\", at character %d expecting ',' but got '%c'", + str, idx, str.charAt(idx))); + + idx = ParseUtils.skipSpaces(str, idx); + } + throw new IllegalArgumentException( + String.format("Malformed vector value \"%s\", missing closing ']'", str)); + } + + private final List list; + + private CqlVector(@NonNull List list) { + + Preconditions.checkArgument( + Iterables.all(list, Predicates.notNull()), "CqlVectors cannot contain null values"); + this.list = list; + } + + /** + * Retrieve the value at the specified index. Modelled after {@link List#get(int)} + * + * @param idx the index to retrieve + * @return the value at the specified index + */ + public T get(int idx) { + return list.get(idx); + } + + /** + * Update the value at the specified index. Modelled after {@link List#set(int, Object)} + * + * @param idx the index to set + * @param val the new value for the specified index + * @return the old value for the specified index + */ + public T set(int idx, T val) { + return list.set(idx, val); + } + + /** + * Return the size of this vector. Modelled after {@link List#size()} + * + * @return the vector size + */ + public int size() { + return this.list.size(); + } + + /** + * Return a CqlVector consisting of the contents of a portion of this vector. Modelled after + * {@link List#subList(int, int)} + * + * @param from the index to start from (inclusive) + * @param to the index to end on (exclusive) + * @return a new CqlVector wrapping the sublist + */ + public CqlVector subVector(int from, int to) { + return new CqlVector(this.list.subList(from, to)); + } + + /** + * Return a boolean indicating whether the vector is empty. Modelled after {@link List#isEmpty()} + * + * @return true if the list is empty, false otherwise + */ + public boolean isEmpty() { + return this.list.isEmpty(); + } + + /** + * Create an {@link Iterator} for this vector + * + * @return the generated iterator + */ + @Override + public Iterator iterator() { + return this.list.iterator(); + } + + /** + * Create a {@link Stream} of the values in this vector + * + * @return the Stream instance + */ + public Stream stream() { + return this.list.stream(); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } else if (o instanceof CqlVector) { + CqlVector that = (CqlVector) o; + return this.list.equals(that.list); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(list); + } + + /** + * The string representation of the vector. Elements, like strings, may not be properly quoted. + * + * @return the string representation + */ + @Override + public String toString() { + return Iterables.toString(this.list); + } + + /** + * Serialization proxy for CqlVector. Allows serialization regardless of implementation of list + * field. + * + * @param inner type of CqlVector, assume Number is always Serializable. + */ + private static class SerializationProxy implements Serializable { + + private static final long serialVersionUID = 1; + + private transient List list; + + SerializationProxy(CqlVector vector) { + this.list = vector.list; + } + + // Reconstruct CqlVector's list of elements. + private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { + stream.defaultReadObject(); + + int size = stream.readInt(); + list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add((T) stream.readObject()); + } + } + + // Return deserialized proxy object as CqlVector. + private Object readResolve() throws ObjectStreamException { + return new CqlVector(list); + } + + // Write size of CqlVector followed by items in order. + private void writeObject(ObjectOutputStream stream) throws IOException { + stream.defaultWriteObject(); + + stream.writeInt(list.size()); + for (T item : list) { + stream.writeObject(item); + } + } + } + + /** @serialData The number of elements in the vector, followed by each element in-order. */ + private Object writeReplace() { + return new SerializationProxy(this); + } + + private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) + throws InvalidObjectException { + // Should never be called since we serialized a proxy + throw new InvalidObjectException("Proxy required"); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java index 0db0a6b44e8..495b96e97c7 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/Data.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java index bf0ccfe1f2b..8393bc9f758 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableById.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -196,6 +198,15 @@ default boolean getBoolean(@NonNull CqlIdentifier id) { return getBoolean(firstIndexOf(id)); } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #getBoolean(CqlIdentifier)}. + */ + @Deprecated + default boolean getBool(@NonNull CqlIdentifier id) { + return getBoolean(id); + } + /** * Returns the value for the first occurrence of {@code id} as a Java primitive byte. * @@ -506,6 +517,25 @@ default CqlDuration getCqlDuration(@NonNull CqlIdentifier id) { return getCqlDuration(firstIndexOf(id)); } + /** + * Returns the value for the first occurrence of {@code id} as a vector. + * + *

      By default, this works with CQL type {@code vector}. + * + *

      If an identifier appears multiple times, this can only be used to access the first value. + * For the other ones, use positional getters. + * + *

      If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of + * this method that takes a string argument. + * + * @throws IllegalArgumentException if the id is invalid. + */ + @Nullable + default CqlVector getVector( + @NonNull CqlIdentifier id, @NonNull Class elementsClass) { + return getVector(firstIndexOf(id), elementsClass); + } + /** * Returns the value for the first occurrence of {@code id} as a token. * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java index 177fd654507..bb75bd9a2b4 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByIndex.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -175,6 +177,15 @@ default boolean getBoolean(int i) { } } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #getBoolean(int)}. + */ + @Deprecated + default boolean getBool(int i) { + return getBoolean(i); + } + /** * Returns the {@code i}th value as a Java primitive byte. * @@ -427,6 +438,18 @@ default CqlDuration getCqlDuration(int i) { return get(i, CqlDuration.class); } + /** + * Returns the {@code i}th value as a vector. + * + *

      By default, this works with CQL type {@code vector}. + * + * @throws IndexOutOfBoundsException if the index is invalid. + */ + @Nullable + default CqlVector getVector(int i, @NonNull Class elementsClass) { + return get(i, GenericType.vectorOf(elementsClass)); + } + /** * Returns the {@code i}th value as a token. * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java index c1aca1576c6..b0a4660033b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/GettableByName.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -196,6 +198,15 @@ default boolean getBoolean(@NonNull String name) { return getBoolean(firstIndexOf(name)); } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #getBoolean(String)}. + */ + @Deprecated + default boolean getBool(@NonNull String name) { + return getBoolean(name); + } + /** * Returns the value for the first occurrence of {@code name} as a Java primitive byte. * @@ -502,6 +513,25 @@ default CqlDuration getCqlDuration(@NonNull String name) { return getCqlDuration(firstIndexOf(name)); } + /** + * Returns the value for the first occurrence of {@code name} as a vector. + * + *

      By default, this works with CQL type {@code vector}. + * + *

      If an identifier appears multiple times, this can only be used to access the first value. + * For the other ones, use positional getters. + * + *

      This method deals with case sensitivity in the way explained in the documentation of {@link + * AccessibleByName}. + * + * @throws IllegalArgumentException if the name is invalid. + */ + @Nullable + default CqlVector getVector( + @NonNull String name, @NonNull Class elementsClass) { + return getVector(firstIndexOf(name), elementsClass); + } + /** * Returns the value for the first occurrence of {@code name} as a token. * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java index de9a906ca49..0f5e3cd9daa 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableById.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,7 +43,7 @@ public interface SettableById> extends SettableByIndex, AccessibleById { /** - * Sets the raw binary representation of the value for the first occurrence of {@code id}. + * Sets the raw binary representation of the value for all occurrences of {@code id}. * *

      This is primarily for internal use; you'll likely want to use one of the typed setters * instead, to pass a higher-level Java representation. @@ -59,7 +61,12 @@ public interface SettableById> @NonNull @CheckReturnValue default SelfT setBytesUnsafe(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { - return setBytesUnsafe(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setBytesUnsafe(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } @NonNull @@ -69,7 +76,7 @@ default DataType getType(@NonNull CqlIdentifier id) { } /** - * Sets the value for the first occurrence of {@code id} to CQL {@code NULL}. + * Sets the value for all occurrences of {@code id} to CQL {@code NULL}. * *

      If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of * this method that takes a string argument. @@ -79,12 +86,16 @@ default DataType getType(@NonNull CqlIdentifier id) { @NonNull @CheckReturnValue default SelfT setToNull(@NonNull CqlIdentifier id) { - return setToNull(firstIndexOf(id)); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setToNull(i); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id}, using the given codec for the - * conversion. + * Sets the value for all occurrences of {@code id}, using the given codec for the conversion. * *

      This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use * the given codec instead. This can be useful if the codec would collide with a previously @@ -102,11 +113,16 @@ default SelfT setToNull(@NonNull CqlIdentifier id) { @CheckReturnValue default SelfT set( @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull TypeCodec codec) { - return set(firstIndexOf(id), v, codec); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).set(i, v, codec); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id}, converting it to the given Java type. + * Sets the value for all occurrences of {@code id}, converting it to the given Java type. * *

      The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. * @@ -123,11 +139,16 @@ default SelfT set( @CheckReturnValue default SelfT set( @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull GenericType targetType) { - return set(firstIndexOf(id), v, targetType); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).set(i, v, targetType); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Returns the value for the first occurrence of {@code id}, converting it to the given Java type. + * Returns the value for all occurrences of {@code id}, converting it to the given Java type. * *

      The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. * @@ -143,11 +164,16 @@ default SelfT set( @CheckReturnValue default SelfT set( @NonNull CqlIdentifier id, @Nullable ValueT v, @NonNull Class targetClass) { - return set(firstIndexOf(id), v, targetClass); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).set(i, v, targetClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive boolean. + * Sets the value for all occurrences of {@code id} to the provided Java primitive boolean. * *

      By default, this works with CQL type {@code boolean}. * @@ -162,11 +188,27 @@ default SelfT set( @NonNull @CheckReturnValue default SelfT setBoolean(@NonNull CqlIdentifier id, boolean v) { - return setBoolean(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setBoolean(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive byte. + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setBoolean(CqlIdentifier, boolean)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT setBool(@NonNull CqlIdentifier id, boolean v) { + return setBoolean(id, v); + } + + /** + * Sets the value for all occurrences of {@code id} to the provided Java primitive byte. * *

      By default, this works with CQL type {@code tinyint}. * @@ -181,11 +223,16 @@ default SelfT setBoolean(@NonNull CqlIdentifier id, boolean v) { @NonNull @CheckReturnValue default SelfT setByte(@NonNull CqlIdentifier id, byte v) { - return setByte(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setByte(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive double. + * Sets the value for all occurrences of {@code id} to the provided Java primitive double. * *

      By default, this works with CQL type {@code double}. * @@ -200,11 +247,16 @@ default SelfT setByte(@NonNull CqlIdentifier id, byte v) { @NonNull @CheckReturnValue default SelfT setDouble(@NonNull CqlIdentifier id, double v) { - return setDouble(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setDouble(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive float. + * Sets the value for all occurrences of {@code id} to the provided Java primitive float. * *

      By default, this works with CQL type {@code float}. * @@ -219,11 +271,16 @@ default SelfT setDouble(@NonNull CqlIdentifier id, double v) { @NonNull @CheckReturnValue default SelfT setFloat(@NonNull CqlIdentifier id, float v) { - return setFloat(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setFloat(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive integer. + * Sets the value for all occurrences of {@code id} to the provided Java primitive integer. * *

      By default, this works with CQL type {@code int}. * @@ -238,11 +295,16 @@ default SelfT setFloat(@NonNull CqlIdentifier id, float v) { @NonNull @CheckReturnValue default SelfT setInt(@NonNull CqlIdentifier id, int v) { - return setInt(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setInt(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive long. + * Sets the value for all occurrences of {@code id} to the provided Java primitive long. * *

      By default, this works with CQL types {@code bigint} and {@code counter}. * @@ -257,11 +319,16 @@ default SelfT setInt(@NonNull CqlIdentifier id, int v) { @NonNull @CheckReturnValue default SelfT setLong(@NonNull CqlIdentifier id, long v) { - return setLong(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setLong(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java primitive short. + * Sets the value for all occurrences of {@code id} to the provided Java primitive short. * *

      By default, this works with CQL type {@code smallint}. * @@ -276,11 +343,16 @@ default SelfT setLong(@NonNull CqlIdentifier id, long v) { @NonNull @CheckReturnValue default SelfT setShort(@NonNull CqlIdentifier id, short v) { - return setShort(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setShort(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java instant. + * Sets the value for all occurrences of {@code id} to the provided Java instant. * *

      By default, this works with CQL type {@code timestamp}. * @@ -292,11 +364,16 @@ default SelfT setShort(@NonNull CqlIdentifier id, short v) { @NonNull @CheckReturnValue default SelfT setInstant(@NonNull CqlIdentifier id, @Nullable Instant v) { - return setInstant(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setInstant(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java local date. + * Sets the value for all occurrences of {@code id} to the provided Java local date. * *

      By default, this works with CQL type {@code date}. * @@ -308,11 +385,16 @@ default SelfT setInstant(@NonNull CqlIdentifier id, @Nullable Instant v) { @NonNull @CheckReturnValue default SelfT setLocalDate(@NonNull CqlIdentifier id, @Nullable LocalDate v) { - return setLocalDate(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setLocalDate(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java local time. + * Sets the value for all occurrences of {@code id} to the provided Java local time. * *

      By default, this works with CQL type {@code time}. * @@ -324,11 +406,16 @@ default SelfT setLocalDate(@NonNull CqlIdentifier id, @Nullable LocalDate v) { @NonNull @CheckReturnValue default SelfT setLocalTime(@NonNull CqlIdentifier id, @Nullable LocalTime v) { - return setLocalTime(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setLocalTime(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java byte buffer. + * Sets the value for all occurrences of {@code id} to the provided Java byte buffer. * *

      By default, this works with CQL type {@code blob}. * @@ -340,11 +427,16 @@ default SelfT setLocalTime(@NonNull CqlIdentifier id, @Nullable LocalTime v) { @NonNull @CheckReturnValue default SelfT setByteBuffer(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { - return setByteBuffer(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setByteBuffer(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java string. + * Sets the value for all occurrences of {@code id} to the provided Java string. * *

      By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. * @@ -356,11 +448,16 @@ default SelfT setByteBuffer(@NonNull CqlIdentifier id, @Nullable ByteBuffer v) { @NonNull @CheckReturnValue default SelfT setString(@NonNull CqlIdentifier id, @Nullable String v) { - return setString(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setString(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java big integer. + * Sets the value for all occurrences of {@code id} to the provided Java big integer. * *

      By default, this works with CQL type {@code varint}. * @@ -372,11 +469,16 @@ default SelfT setString(@NonNull CqlIdentifier id, @Nullable String v) { @NonNull @CheckReturnValue default SelfT setBigInteger(@NonNull CqlIdentifier id, @Nullable BigInteger v) { - return setBigInteger(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setBigInteger(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java big decimal. + * Sets the value for all occurrences of {@code id} to the provided Java big decimal. * *

      By default, this works with CQL type {@code decimal}. * @@ -388,11 +490,16 @@ default SelfT setBigInteger(@NonNull CqlIdentifier id, @Nullable BigInteger v) { @NonNull @CheckReturnValue default SelfT setBigDecimal(@NonNull CqlIdentifier id, @Nullable BigDecimal v) { - return setBigDecimal(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setBigDecimal(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java UUID. + * Sets the value for all occurrences of {@code id} to the provided Java UUID. * *

      By default, this works with CQL types {@code uuid} and {@code timeuuid}. * @@ -404,11 +511,16 @@ default SelfT setBigDecimal(@NonNull CqlIdentifier id, @Nullable BigDecimal v) { @NonNull @CheckReturnValue default SelfT setUuid(@NonNull CqlIdentifier id, @Nullable UUID v) { - return setUuid(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setUuid(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java IP address. + * Sets the value for all occurrences of {@code id} to the provided Java IP address. * *

      By default, this works with CQL type {@code inet}. * @@ -420,11 +532,16 @@ default SelfT setUuid(@NonNull CqlIdentifier id, @Nullable UUID v) { @NonNull @CheckReturnValue default SelfT setInetAddress(@NonNull CqlIdentifier id, @Nullable InetAddress v) { - return setInetAddress(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setInetAddress(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided duration. + * Sets the value for all occurrences of {@code id} to the provided duration. * *

      By default, this works with CQL type {@code duration}. * @@ -436,11 +553,40 @@ default SelfT setInetAddress(@NonNull CqlIdentifier id, @Nullable InetAddress v) @NonNull @CheckReturnValue default SelfT setCqlDuration(@NonNull CqlIdentifier id, @Nullable CqlDuration v) { - return setCqlDuration(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setCqlDuration(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; + } + + /** + * Sets the value for all occurrences of {@code id} to the provided {@code vector}. + * + *

      By default, this works with CQL type {@code vector}. + * + *

      If you want to avoid the overhead of building a {@code CqlIdentifier}, use the variant of + * this method that takes a string argument. + * + * @throws IllegalArgumentException if the id is invalid. + */ + @NonNull + @CheckReturnValue + default SelfT setVector( + @NonNull CqlIdentifier id, + @Nullable CqlVector v, + @NonNull Class elementsClass) { + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setVector(i, v, elementsClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided token. + * Sets the value for all occurrences of {@code id} to the provided token. * *

      This works with the CQL type matching the partitioner in use for this cluster: {@code * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and @@ -454,11 +600,16 @@ default SelfT setCqlDuration(@NonNull CqlIdentifier id, @Nullable CqlDuration v) @NonNull @CheckReturnValue default SelfT setToken(@NonNull CqlIdentifier id, @NonNull Token v) { - return setToken(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setToken(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java list. + * Sets the value for all occurrences of {@code id} to the provided Java list. * *

      By default, this works with CQL type {@code list}. * @@ -476,11 +627,16 @@ default SelfT setList( @NonNull CqlIdentifier id, @Nullable List v, @NonNull Class elementsClass) { - return setList(firstIndexOf(id), v, elementsClass); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setList(i, v, elementsClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java set. + * Sets the value for all occurrences of {@code id} to the provided Java set. * *

      By default, this works with CQL type {@code set}. * @@ -498,11 +654,16 @@ default SelfT setSet( @NonNull CqlIdentifier id, @Nullable Set v, @NonNull Class elementsClass) { - return setSet(firstIndexOf(id), v, elementsClass); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setSet(i, v, elementsClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided Java map. + * Sets the value for all occurrences of {@code id} to the provided Java map. * *

      By default, this works with CQL type {@code map}. * @@ -521,11 +682,16 @@ default SelfT setMap( @Nullable Map v, @NonNull Class keyClass, @NonNull Class valueClass) { - return setMap(firstIndexOf(id), v, keyClass, valueClass); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setMap(i, v, keyClass, valueClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided user defined type value. + * Sets the value for all occurrences of {@code id} to the provided user defined type value. * *

      By default, this works with CQL user-defined types. * @@ -537,11 +703,16 @@ default SelfT setMap( @NonNull @CheckReturnValue default SelfT setUdtValue(@NonNull CqlIdentifier id, @Nullable UdtValue v) { - return setUdtValue(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setUdtValue(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code id} to the provided tuple value. + * Sets the value for all occurrences of {@code id} to the provided tuple value. * *

      By default, this works with CQL tuples. * @@ -553,6 +724,11 @@ default SelfT setUdtValue(@NonNull CqlIdentifier id, @Nullable UdtValue v) { @NonNull @CheckReturnValue default SelfT setTupleValue(@NonNull CqlIdentifier id, @Nullable TupleValue v) { - return setTupleValue(firstIndexOf(id), v); + SelfT result = null; + for (Integer i : allIndicesOf(id)) { + result = (result == null ? this : result).setTupleValue(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java index 2ff700cc3fa..4ecdf647590 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByIndex.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -153,6 +155,17 @@ default SelfT setBoolean(int i, boolean v) { : set(i, v, codec); } + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #setBoolean(int, boolean)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT setBool(int i, boolean v) { + return setBoolean(i, v); + } + /** * Sets the {@code i}th value to the provided Java primitive byte. * @@ -403,6 +416,20 @@ default SelfT setCqlDuration(int i, @Nullable CqlDuration v) { return set(i, v, CqlDuration.class); } + /** + * Sets the {@code i}th value to the provided vector. + * + *

      By default, this works with CQL type {@code vector}. + * + * @throws IndexOutOfBoundsException if the index is invalid. + */ + @NonNull + @CheckReturnValue + default SelfT setVector( + int i, @Nullable CqlVector v, @NonNull Class elementsClass) { + return set(i, v, GenericType.vectorOf(elementsClass)); + } + /** * Sets the {@code i}th value to the provided token. * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java index 0ebd95b22cc..afe9ba59f64 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/SettableByName.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,7 +42,7 @@ public interface SettableByName> extends SettableByIndex, AccessibleByName { /** - * Sets the raw binary representation of the value for the first occurrence of {@code name}. + * Sets the raw binary representation of the value for all occurrences of {@code name}. * *

      This is primarily for internal use; you'll likely want to use one of the typed setters * instead, to pass a higher-level Java representation. @@ -58,7 +60,12 @@ public interface SettableByName> @NonNull @CheckReturnValue default SelfT setBytesUnsafe(@NonNull String name, @Nullable ByteBuffer v) { - return setBytesUnsafe(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setBytesUnsafe(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } @NonNull @@ -68,7 +75,7 @@ default DataType getType(@NonNull String name) { } /** - * Sets the value for the first occurrence of {@code name} to CQL {@code NULL}. + * Sets the value for all occurrences of {@code name} to CQL {@code NULL}. * *

      This method deals with case sensitivity in the way explained in the documentation of {@link * AccessibleByName}. @@ -78,12 +85,16 @@ default DataType getType(@NonNull String name) { @NonNull @CheckReturnValue default SelfT setToNull(@NonNull String name) { - return setToNull(firstIndexOf(name)); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setToNull(i); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name}, using the given codec for the - * conversion. + * Sets the value for all occurrences of {@code name}, using the given codec for the conversion. * *

      This method completely bypasses the {@link #codecRegistry()}, and forces the driver to use * the given codec instead. This can be useful if the codec would collide with a previously @@ -101,11 +112,16 @@ default SelfT setToNull(@NonNull String name) { @CheckReturnValue default SelfT set( @NonNull String name, @Nullable ValueT v, @NonNull TypeCodec codec) { - return set(firstIndexOf(name), v, codec); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).set(i, v, codec); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name}, converting it to the given Java type. + * Sets the value for all occurrences of {@code name}, converting it to the given Java type. * *

      The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. * @@ -122,12 +138,16 @@ default SelfT set( @CheckReturnValue default SelfT set( @NonNull String name, @Nullable ValueT v, @NonNull GenericType targetType) { - return set(firstIndexOf(name), v, targetType); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).set(i, v, targetType); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Returns the value for the first occurrence of {@code name}, converting it to the given Java - * type. + * Returns the value for all occurrences of {@code name}, converting it to the given Java type. * *

      The {@link #codecRegistry()} will be used to look up a codec to handle the conversion. * @@ -143,11 +163,16 @@ default SelfT set( @CheckReturnValue default SelfT set( @NonNull String name, @Nullable ValueT v, @NonNull Class targetClass) { - return set(firstIndexOf(name), v, targetClass); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).set(i, v, targetClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive boolean. + * Sets the value for all occurrences of {@code name} to the provided Java primitive boolean. * *

      By default, this works with CQL type {@code boolean}. * @@ -162,11 +187,27 @@ default SelfT set( @NonNull @CheckReturnValue default SelfT setBoolean(@NonNull String name, boolean v) { - return setBoolean(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setBoolean(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; + } + + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias + * for{@link #setBoolean(String, boolean)}. + */ + @Deprecated + @NonNull + @CheckReturnValue + default SelfT setBool(@NonNull String name, boolean v) { + return setBoolean(name, v); } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive byte. + * Sets the value for all occurrences of {@code name} to the provided Java primitive byte. * *

      By default, this works with CQL type {@code tinyint}. * @@ -181,11 +222,16 @@ default SelfT setBoolean(@NonNull String name, boolean v) { @NonNull @CheckReturnValue default SelfT setByte(@NonNull String name, byte v) { - return setByte(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setByte(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive double. + * Sets the value for all occurrences of {@code name} to the provided Java primitive double. * *

      By default, this works with CQL type {@code double}. * @@ -200,11 +246,16 @@ default SelfT setByte(@NonNull String name, byte v) { @NonNull @CheckReturnValue default SelfT setDouble(@NonNull String name, double v) { - return setDouble(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setDouble(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive float. + * Sets the value for all occurrences of {@code name} to the provided Java primitive float. * *

      By default, this works with CQL type {@code float}. * @@ -219,11 +270,16 @@ default SelfT setDouble(@NonNull String name, double v) { @NonNull @CheckReturnValue default SelfT setFloat(@NonNull String name, float v) { - return setFloat(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setFloat(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive integer. + * Sets the value for all occurrences of {@code name} to the provided Java primitive integer. * *

      By default, this works with CQL type {@code int}. * @@ -238,11 +294,16 @@ default SelfT setFloat(@NonNull String name, float v) { @NonNull @CheckReturnValue default SelfT setInt(@NonNull String name, int v) { - return setInt(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setInt(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive long. + * Sets the value for all occurrences of {@code name} to the provided Java primitive long. * *

      By default, this works with CQL types {@code bigint} and {@code counter}. * @@ -257,11 +318,16 @@ default SelfT setInt(@NonNull String name, int v) { @NonNull @CheckReturnValue default SelfT setLong(@NonNull String name, long v) { - return setLong(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setLong(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java primitive short. + * Sets the value for all occurrences of {@code name} to the provided Java primitive short. * *

      By default, this works with CQL type {@code smallint}. * @@ -276,11 +342,16 @@ default SelfT setLong(@NonNull String name, long v) { @NonNull @CheckReturnValue default SelfT setShort(@NonNull String name, short v) { - return setShort(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setShort(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java instant. + * Sets the value for all occurrences of {@code name} to the provided Java instant. * *

      By default, this works with CQL type {@code timestamp}. * @@ -292,11 +363,16 @@ default SelfT setShort(@NonNull String name, short v) { @NonNull @CheckReturnValue default SelfT setInstant(@NonNull String name, @Nullable Instant v) { - return setInstant(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setInstant(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java local date. + * Sets the value for all occurrences of {@code name} to the provided Java local date. * *

      By default, this works with CQL type {@code date}. * @@ -308,11 +384,16 @@ default SelfT setInstant(@NonNull String name, @Nullable Instant v) { @NonNull @CheckReturnValue default SelfT setLocalDate(@NonNull String name, @Nullable LocalDate v) { - return setLocalDate(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setLocalDate(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java local time. + * Sets the value for all occurrences of {@code name} to the provided Java local time. * *

      By default, this works with CQL type {@code time}. * @@ -324,11 +405,16 @@ default SelfT setLocalDate(@NonNull String name, @Nullable LocalDate v) { @NonNull @CheckReturnValue default SelfT setLocalTime(@NonNull String name, @Nullable LocalTime v) { - return setLocalTime(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setLocalTime(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java byte buffer. + * Sets the value for all occurrences of {@code name} to the provided Java byte buffer. * *

      By default, this works with CQL type {@code blob}. * @@ -340,11 +426,16 @@ default SelfT setLocalTime(@NonNull String name, @Nullable LocalTime v) { @NonNull @CheckReturnValue default SelfT setByteBuffer(@NonNull String name, @Nullable ByteBuffer v) { - return setByteBuffer(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setByteBuffer(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java string. + * Sets the value for all occurrences of {@code name} to the provided Java string. * *

      By default, this works with CQL types {@code text}, {@code varchar} and {@code ascii}. * @@ -356,11 +447,16 @@ default SelfT setByteBuffer(@NonNull String name, @Nullable ByteBuffer v) { @NonNull @CheckReturnValue default SelfT setString(@NonNull String name, @Nullable String v) { - return setString(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setString(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java big integer. + * Sets the value for all occurrences of {@code name} to the provided Java big integer. * *

      By default, this works with CQL type {@code varint}. * @@ -372,11 +468,16 @@ default SelfT setString(@NonNull String name, @Nullable String v) { @NonNull @CheckReturnValue default SelfT setBigInteger(@NonNull String name, @Nullable BigInteger v) { - return setBigInteger(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setBigInteger(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java big decimal. + * Sets the value for all occurrences of {@code name} to the provided Java big decimal. * *

      By default, this works with CQL type {@code decimal}. * @@ -388,11 +489,16 @@ default SelfT setBigInteger(@NonNull String name, @Nullable BigInteger v) { @NonNull @CheckReturnValue default SelfT setBigDecimal(@NonNull String name, @Nullable BigDecimal v) { - return setBigDecimal(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setBigDecimal(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java UUID. + * Sets the value for all occurrences of {@code name} to the provided Java UUID. * *

      By default, this works with CQL types {@code uuid} and {@code timeuuid}. * @@ -404,11 +510,16 @@ default SelfT setBigDecimal(@NonNull String name, @Nullable BigDecimal v) { @NonNull @CheckReturnValue default SelfT setUuid(@NonNull String name, @Nullable UUID v) { - return setUuid(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setUuid(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java IP address. + * Sets the value for all occurrences of {@code name} to the provided Java IP address. * *

      By default, this works with CQL type {@code inet}. * @@ -420,11 +531,16 @@ default SelfT setUuid(@NonNull String name, @Nullable UUID v) { @NonNull @CheckReturnValue default SelfT setInetAddress(@NonNull String name, @Nullable InetAddress v) { - return setInetAddress(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setInetAddress(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided duration. + * Sets the value for all occurrences of {@code name} to the provided duration. * *

      By default, this works with CQL type {@code duration}. * @@ -436,11 +552,40 @@ default SelfT setInetAddress(@NonNull String name, @Nullable InetAddress v) { @NonNull @CheckReturnValue default SelfT setCqlDuration(@NonNull String name, @Nullable CqlDuration v) { - return setCqlDuration(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setCqlDuration(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; + } + + /** + * Sets the value for all occurrences of {@code name} to the provided vector. + * + *

      By default, this works with CQL type {@code vector}. + * + *

      This method deals with case sensitivity in the way explained in the documentation of {@link + * AccessibleByName}. + * + * @throws IllegalArgumentException if the name is invalid. + */ + @NonNull + @CheckReturnValue + default SelfT setVector( + @NonNull String name, + @Nullable CqlVector v, + @NonNull Class elementsClass) { + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setVector(i, v, elementsClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided token. + * Sets the value for all occurrences of {@code name} to the provided token. * *

      This works with the CQL type matching the partitioner in use for this cluster: {@code * bigint} for {@code Murmur3Partitioner}, {@code blob} for {@code ByteOrderedPartitioner}, and @@ -454,11 +599,16 @@ default SelfT setCqlDuration(@NonNull String name, @Nullable CqlDuration v) { @NonNull @CheckReturnValue default SelfT setToken(@NonNull String name, @NonNull Token v) { - return setToken(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setToken(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java list. + * Sets the value for all occurrences of {@code name} to the provided Java list. * *

      By default, this works with CQL type {@code list}. * @@ -474,11 +624,16 @@ default SelfT setToken(@NonNull String name, @NonNull Token v) { @CheckReturnValue default SelfT setList( @NonNull String name, @Nullable List v, @NonNull Class elementsClass) { - return setList(firstIndexOf(name), v, elementsClass); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setList(i, v, elementsClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java set. + * Sets the value for all occurrences of {@code name} to the provided Java set. * *

      By default, this works with CQL type {@code set}. * @@ -494,11 +649,16 @@ default SelfT setList( @CheckReturnValue default SelfT setSet( @NonNull String name, @Nullable Set v, @NonNull Class elementsClass) { - return setSet(firstIndexOf(name), v, elementsClass); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setSet(i, v, elementsClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided Java map. + * Sets the value for all occurrences of {@code name} to the provided Java map. * *

      By default, this works with CQL type {@code map}. * @@ -517,12 +677,16 @@ default SelfT setMap( @Nullable Map v, @NonNull Class keyClass, @NonNull Class valueClass) { - return setMap(firstIndexOf(name), v, keyClass, valueClass); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setMap(i, v, keyClass, valueClass); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided user defined type - * value. + * Sets the value for all occurrences of {@code name} to the provided user defined type value. * *

      By default, this works with CQL user-defined types. * @@ -534,11 +698,16 @@ default SelfT setMap( @NonNull @CheckReturnValue default SelfT setUdtValue(@NonNull String name, @Nullable UdtValue v) { - return setUdtValue(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setUdtValue(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } /** - * Sets the value for the first occurrence of {@code name} to the provided tuple value. + * Sets the value for all occurrences of {@code name} to the provided tuple value. * *

      By default, this works with CQL tuples. * @@ -550,6 +719,11 @@ default SelfT setUdtValue(@NonNull String name, @Nullable UdtValue v) { @NonNull @CheckReturnValue default SelfT setTupleValue(@NonNull String name, @Nullable TupleValue v) { - return setTupleValue(firstIndexOf(name), v); + SelfT result = null; + for (Integer i : allIndicesOf(name)) { + result = (result == null ? this : result).setTupleValue(i, v); + } + assert result != null; // allIndices throws if there are no results + return result; } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java index 5937bc0517c..0fde2d87e71 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/TupleValue.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,12 +28,50 @@ * *

      A tuple value is attached if and only if its type is attached (see {@link Detachable}). * - *

      The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. + *

      The default implementation returned by the driver is mutable and serializable. If you write + * your own implementation, serializability is not mandatory, but recommended for use with some + * 3rd-party tools like Apache Spark ™. */ public interface TupleValue extends GettableByIndex, SettableByIndex { @NonNull TupleType getType(); + + /** + * Returns a string representation of the contents of this tuple. + * + *

      This produces a CQL literal, for example: + * + *

      +   * (1,'test')
      +   * 
      + * + * Notes: + * + *
        + *
      • This method does not sanitize its output in any way. In particular, no effort is made to + * limit output size: all fields are included, and large strings or blobs will be appended + * as-is. + *
      • Be mindful of how you expose the result. For example, in high-security environments, it + * might be undesirable to leak data in application logs. + *
      + */ + @NonNull + default String getFormattedContents() { + return codecRegistry().codecFor(getType(), TupleValue.class).format(this); + } + + /** + * Returns an abstract representation of this object, that may not include the tuple's + * contents. + * + *

      The driver's built-in {@link TupleValue} implementation returns the default format of {@link + * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. + * + *

      Omitting the contents was a deliberate choice, because we feel it would make it too easy to + * accidentally leak data (e.g. in application logs). If you want the contents, use {@link + * #getFormattedContents()}. + */ + @Override + String toString(); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java b/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java index bfdebdfd7fa..7e8bc80793b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/data/UdtValue.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,13 +28,51 @@ * *

      A tuple value is attached if and only if its type is attached (see {@link Detachable}). * - *

      The default implementation returned by the driver is immutable and serializable. If you write - * your own implementation, it should at least be thread-safe; serializability is not mandatory, but - * recommended for use with some 3rd-party tools like Apache Spark ™. + *

      The default implementation returned by the driver is mutable and serializable. If you write + * your own implementation, serializability is not mandatory, but recommended for use with some + * 3rd-party tools like Apache Spark ™. */ public interface UdtValue extends GettableById, GettableByName, SettableById, SettableByName { @NonNull UserDefinedType getType(); + + /** + * Returns a string representation of the contents of this UDT. + * + *

      This produces a CQL literal, for example: + * + *

      +   * {street:'42 Main Street',zip:12345}
      +   * 
      + * + * Notes: + * + *
        + *
      • This method does not sanitize its output in any way. In particular, no effort is made to + * limit output size: all fields are included, and large strings or blobs will be appended + * as-is. + *
      • Be mindful of how you expose the result. For example, in high-security environments, it + * might be undesirable to leak data in application logs. + *
      + */ + @NonNull + default String getFormattedContents() { + return codecRegistry().codecFor(getType(), UdtValue.class).format(this); + } + + /** + * Returns an abstract representation of this object, that may not include the UDT's + * contents. + * + *

      The driver's built-in {@link UdtValue} implementation returns the default format of {@link + * Object#toString()}: the class name, followed by the at-sign and the hash code of the object. + * + *

      Omitting the contents was a deliberate choice, because we feel it would make it too easy to + * accidentally leak data (e.g. in application logs). If you want the contents, use {@link + * #getFormattedContents()}. + */ + @Override + String toString(); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java b/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java index 930a72a35fd..d1897f66e16 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/detach/AttachmentPoint.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.datastax.oss.driver.api.core.ProtocolVersion; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; import edu.umd.cs.findbugs.annotations.NonNull; /** @see Detachable */ @@ -39,6 +42,12 @@ public CodecRegistry getCodecRegistry() { @NonNull ProtocolVersion getProtocolVersion(); + /** + * Note that the default registry implementation returned by the driver also implements {@link + * MutableCodecRegistry}, which allows you to register new codecs at runtime. You can safely cast + * the result of this method (as long as you didn't extend the driver context to plug a custom + * registry implementation). + */ @NonNull CodecRegistry getCodecRegistry(); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java b/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java index 73d1f804fac..0c92bb727ea 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/detach/Detachable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java index 425e11c0c5a..de0d9db4ebd 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,22 +21,41 @@ import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.UUID; /** Decides which Cassandra nodes to contact for each query. */ public interface LoadBalancingPolicy extends AutoCloseable { + /** + * Returns an optional {@link RequestTracker} to be registered with the session. Registering a + * request tracker allows load-balancing policies to track node latencies in order to pick the + * fastest ones. + * + *

      This method is invoked only once during session configuration, and before any other methods + * in this interface. Note that at this point, the driver hasn't connected to any node yet. + * + * @since 4.13.0 + */ + @NonNull + default Optional getRequestTracker() { + return Optional.empty(); + } + /** * Initializes this policy with the nodes discovered during driver initialization. * *

      This method is guaranteed to be called exactly once per instance, and before any other - * method in this class. At this point, the driver has successfully connected to one of the - * contact points, and performed a first refresh of topology information (by default, the contents - * of {@code system.peers}), to discover other nodes in the cluster. + * method in this interface except {@link #getRequestTracker()}. At this point, the driver has + * successfully connected to one of the contact points, and performed a first refresh of topology + * information (by default, the contents of {@code system.peers}), to discover other nodes in the + * cluster. * *

      This method must call {@link DistanceReporter#setDistance(Node, NodeDistance) * distanceReporter.setDistance} for each provided node (otherwise that node will stay at distance @@ -50,12 +71,18 @@ public interface LoadBalancingPolicy extends AutoCloseable { * @param nodes all the nodes that are known to exist in the cluster (regardless of their state) * at the time of invocation. * @param distanceReporter an object that will be used by the policy to signal distance changes. - * Implementations will typically store a this in a field, since new nodes may get {@link + * Implementations will typically store this in a field, since new nodes may get {@link * #onAdd(Node) added} later and will need to have their distance set (or the policy might * change distances dynamically over time). */ void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter); + /** Returns map containing details that impact C* node connectivity. */ + @NonNull + default Map getStartupConfiguration() { + return Collections.emptyMap(); + } + /** * Returns the coordinators to use for a new query. * diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java index 0bff43d6a46..aaae7957d00 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistance.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java new file mode 100644 index 00000000000..9a5a7f5a894 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.loadbalancing; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * A pluggable {@link NodeDistance} evaluator. + * + *

      Node distance evaluators are recognized by all the driver built-in load balancing policies. + * They can be specified {@linkplain + * com.datastax.oss.driver.api.core.session.SessionBuilder#withNodeDistanceEvaluator(String, + * NodeDistanceEvaluator) programmatically} or through the configuration (with the {@code + * load-balancing-policy.evaluator.class} option). + * + * @see com.datastax.oss.driver.api.core.session.SessionBuilder#withNodeDistanceEvaluator(String, + * NodeDistanceEvaluator) + */ +@FunctionalInterface +public interface NodeDistanceEvaluator { + + /** + * Evaluates the distance to apply to the given node. + * + *

      This method will be invoked each time the {@link LoadBalancingPolicy} processes a topology + * or state change, and will be passed the node being inspected, and the local datacenter name (or + * null if none is defined). If it returns a non-null {@link NodeDistance}, the policy will + * suggest that distance for the node; if it returns null, the policy will assign a default + * distance instead, based on its internal algorithm for computing node distances. + * + * @param node The node to assign a new distance to. + * @param localDc The local datacenter name, if defined, or null otherwise. + * @return The {@link NodeDistance} to assign to the node, or null to let the policy decide. + */ + @Nullable + NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc); +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java index 18908d90c5e..530f2ad38ac 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/EndPoint.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java index a996d8a1eaf..21ad200abed 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Metadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -114,4 +116,16 @@ default Optional getKeyspace(@NonNull String keyspaceName) { */ @NonNull Optional getTokenMap(); + + /** + * The cluster name to which this session is connected. The Optional returned should contain the + * value from the server for system.local.cluster_name. + * + *

      Note that this method has a default implementation for backwards compatibility. It is + * expected that any implementing classes override this method. + */ + @NonNull + default Optional getClusterName() { + return Optional.empty(); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java index 2f5d11c4071..fbfc748dd52 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/Node.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +20,7 @@ import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.session.Session; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.net.InetSocketAddress; @@ -30,6 +33,11 @@ * *

      This object is mutable, all of its properties may be updated at runtime to reflect the latest * state of the node. + * + *

      Note that the default implementation returned by the driver uses reference equality. A + * {@link Session} will always return the same instance for a given {@link #getHostId() host id}. + * However, instances coming from different sessions will not be equal, even if they refer to the + * same host id. */ public interface Node { @@ -178,9 +186,26 @@ public interface Node { * The host ID that is assigned to this node by Cassandra. This value can be used to uniquely * identify a node even when the underling IP address changes. * - *

      This information is always present. + *

      This information is always present once the session has initialized. However, there is a + * narrow corner case where a driver client can observe a null value: if a {@link + * NodeStateListener} is registered, the very first {@code onUp} call will reference a node + * that has a null id (that node is the initial contact point, and the driver hasn't read host ids + * from {@code system.local} and {@code system.peers} yet). Beyond that point — including + * any other {@code onUp} call — the host id will always be present. + * + *

      +   * CqlSession session = CqlSession.builder()
      +   *     .withNodeStateListener(
      +   *         new NodeStateListenerBase() {
      +   *           @Override
      +   *           public void onUp(@NonNull Node node) {
      +   *             // node.getHostId() == null for the first invocation only
      +   *           }
      +   *         })
      +   *     .build();
      +   * 
      */ - @NonNull + @Nullable UUID getHostId(); /** diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java index c35ea0ccacb..2f2460886ef 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeState.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,25 +32,17 @@ public enum NodeState { */ UNKNOWN, /** - * A node is considered up in either of the following situations: - * - *
        - *
      • the driver has at least one active connection to the node. - *
      • the driver is not actively trying to connect to the node (because it's ignored by the - * {@link LoadBalancingPolicy}), but it has received a topology event indicating that the - * node is up. - *
      + * A node is considered up in either of the following situations: 1) the driver has at least one + * active connection to the node, or 2) the driver is not actively trying to connect to the node + * (because it's ignored by the {@link LoadBalancingPolicy}), but it has received a topology event + * indicating that the node is up. */ UP, /** - * A node is considered down in either of the following situations: - * - *
        - *
      • the driver has lost all connections to the node (and is currently trying to reconnect). - *
      • the driver is not actively trying to connect to the node (because it's ignored by the - * {@link LoadBalancingPolicy}), but it has received a topology event indicating that the - * node is down. - *
      + * A node is considered down in either of the following situations: 1) the driver has lost all + * connections to the node (and is currently trying to reconnect), or 2) the driver is not + * actively trying to connect to the node (because it's ignored by the {@link + * LoadBalancingPolicy}), but it has received a topology event indicating that the node is down. */ DOWN, /** diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java index f4e5677ce9e..bb52e9d1496 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListener.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +18,16 @@ package com.datastax.oss.driver.api.core.metadata; import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; import edu.umd.cs.findbugs.annotations.NonNull; /** * A listener that gets notified when nodes states change. * - *

      An implementation of this interface can be registered in the configuration, or with {@link - * SessionBuilder#withNodeStateListener(NodeStateListener)}. + *

      Implementations of this interface can be registered either via the configuration (see {@code + * reference.conf} in the manual or core driver JAR), or programmatically via {@link + * SessionBuilder#addNodeStateListener(NodeStateListener)}. * *

      Note that the methods defined by this interface will be executed by internal driver threads, * and are therefore expected to have short execution times. If you need to perform long @@ -32,6 +36,9 @@ * *

      If you implement this interface but don't need to implement all the methods, extend {@link * NodeStateListenerBase}. + * + *

      If your implementation of this interface requires access to a fully-initialized session, + * consider wrapping it in a {@link SafeInitNodeStateListener}. */ public interface NodeStateListener extends AutoCloseable { @@ -63,4 +70,42 @@ public interface NodeStateListener extends AutoCloseable { * absent from the new list. */ void onRemove(@NonNull Node node); + + /** + * Invoked when the session is ready to process user requests. + * + *

      This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future + * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, + * this method will not get called. + * + *

      Listener methods are invoked from different threads; if you store the session in a field, + * make it at least volatile to guarantee proper publication. + * + *

      Note that this method will not be the first one invoked on the listener; the driver emits + * node events before that, during the initialization of the session: + * + *

        + *
      • First the driver shuffles the contact points, and tries each one sequentially. For any + * contact point that can't be reached, {@link #onDown(Node)} is invoked; for the one that + * eventually succeeds, {@link #onUp(Node)} is invoked and that node becomes the control + * node (if none succeeds, the session initialization fails and the process stops here). + *
      • The control node's {@code system.peers} table is inspected to discover the remaining + * nodes in the cluster. For any node that wasn't already a contact point, {@link + * #onAdd(Node)} is invoked; for any contact point that doesn't have a corresponding entry + * in the table, {@link #onRemove(Node)} is invoked; + *
      • The load balancing policy computes the nodes' {@linkplain NodeDistance distances}, and, + * for each LOCAL or REMOTE node, the driver creates a connection pool. If at least one + * pooled connection can be established, {@link #onUp(Node)} is invoked; otherwise, {@link + * #onDown(Node)} is invoked (no additional event is emitted for the control node, it is + * considered up since we already have a connection to it). + *
      • Once all the pools are created, the session is fully initialized and this method is + * invoked. + *
      + * + * If you're not interested in those init events, or want to delay them until after the session is + * ready, take a look at {@link SafeInitNodeStateListener}. + * + *

      This method's default implementation is empty. + */ + default void onSessionReady(@NonNull Session session) {} } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java index 6420ee0b53f..0b747a00084 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java new file mode 100644 index 00000000000..c33f7616b5a --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.metadata; + +import com.datastax.oss.driver.api.core.session.Session; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiConsumer; +import net.jcip.annotations.GuardedBy; + +/** + * A node state listener wrapper that delays (or ignores) init events until after the session is + * ready. + * + *

      By default, the driver calls node state events, such as {@link #onUp} and {@link #onAdd}, + * before the session is ready; see {@link NodeStateListener#onSessionReady(Session)} for a detailed + * explanation. This can make things complicated if your listener implementation needs the session + * to process those events. + * + *

      This class wraps another implementation to shield it from those details: + * + *

      + * NodeStateListener delegate = ... // your listener implementation
      + *
      + * SafeInitNodeStateListener wrapper =
      + *     new SafeInitNodeStateListener(delegate, true);
      + *
      + * CqlSession session = CqlSession.builder()
      + *     .withNodeStateListener(wrapper)
      + *     .build();
      + * 
      + * + * With this setup, {@code delegate.onSessionReady} is guaranteed to be invoked first, before any + * other method. The second constructor argument indicates what to do with the method calls that + * were ignored before that: + * + *
        + *
      • if {@code true}, they are recorded, and replayed to {@code delegate} immediately after + * {@link #onSessionReady}. They are guaranteed to happen in the original order, and before + * any post-initialization events. + *
      • if {@code false}, they are discarded. + *
      + * + *

      Usage in non-blocking applications: beware that this class is not lock-free. It is implemented + * with locks for internal coordination. + * + * @since 4.6.0 + */ +public class SafeInitNodeStateListener implements NodeStateListener { + + private final NodeStateListener delegate; + private final boolean replayInitEvents; + + // Write lock: recording init events or setting sessionReady + // Read lock: reading init events or checking sessionReady + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + @GuardedBy("lock") + private boolean sessionReady; + + @GuardedBy("lock") + private final List initEvents = new ArrayList<>(); + + /** + * Creates a new instance. + * + * @param delegate the wrapped listener, to which method invocations will be forwarded. + * @param replayInitEvents whether to record events during initialization and replay them to the + * child listener once it's created, or just ignore them. + */ + public SafeInitNodeStateListener(@NonNull NodeStateListener delegate, boolean replayInitEvents) { + this.delegate = Objects.requireNonNull(delegate); + this.replayInitEvents = replayInitEvents; + } + + @Override + public void onSessionReady(@NonNull Session session) { + lock.writeLock().lock(); + try { + if (!sessionReady) { + sessionReady = true; + delegate.onSessionReady(session); + if (replayInitEvents) { + for (InitEvent event : initEvents) { + event.invoke(delegate); + } + } + } + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public void onAdd(@NonNull Node node) { + onEvent(node, InitEvent.Type.ADD); + } + + @Override + public void onUp(@NonNull Node node) { + onEvent(node, InitEvent.Type.UP); + } + + @Override + public void onDown(@NonNull Node node) { + onEvent(node, InitEvent.Type.DOWN); + } + + @Override + public void onRemove(@NonNull Node node) { + onEvent(node, InitEvent.Type.REMOVE); + } + + private void onEvent(Node node, InitEvent.Type eventType) { + + // Cheap case: the session is ready, just delegate + lock.readLock().lock(); + try { + if (sessionReady) { + eventType.listenerMethod.accept(delegate, node); + return; + } + } finally { + lock.readLock().unlock(); + } + + // Otherwise, we must acquire the write lock to record the event + if (replayInitEvents) { + lock.writeLock().lock(); + try { + // Must re-check because we completely released the lock for a short duration + if (sessionReady) { + eventType.listenerMethod.accept(delegate, node); + } else { + initEvents.add(new InitEvent(node, eventType)); + } + } finally { + lock.writeLock().unlock(); + } + } + } + + @Override + public void close() throws Exception { + delegate.close(); + } + + private static class InitEvent { + enum Type { + ADD(NodeStateListener::onAdd), + UP(NodeStateListener::onUp), + DOWN(NodeStateListener::onDown), + REMOVE(NodeStateListener::onRemove), + ; + + @SuppressWarnings("ImmutableEnumChecker") + final BiConsumer listenerMethod; + + Type(BiConsumer listenerMethod) { + this.listenerMethod = listenerMethod; + } + } + + final Node node; + final Type type; + + InitEvent(@NonNull Node node, @NonNull Type type) { + this.node = Objects.requireNonNull(node); + this.type = Objects.requireNonNull(type); + } + + void invoke(@NonNull NodeStateListener target) { + type.listenerMethod.accept(Objects.requireNonNull(target), node); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java index 319e3e82d8f..7746bf3382e 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/TokenMap.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java index acc06540225..35eec88eb45 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/AggregateMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java index ce50cc6ef40..97613e2d2f1 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ClusteringOrder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java index 236cfd9b385..fb91211e2fa 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ColumnMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java index 3512c9a5560..bf1bf97b19e 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/Describable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java index ff273c34f81..ed2d4d780de 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java index 14bb7947a60..8108b4b7afd 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/FunctionSignature.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +21,7 @@ import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.util.List; import java.util.Objects; import net.jcip.annotations.Immutable; @@ -30,7 +33,10 @@ * {@code sum(int, int)} are not equal. */ @Immutable -public class FunctionSignature { +public class FunctionSignature implements Serializable { + + private static final long serialVersionUID = 1; + @NonNull private final CqlIdentifier name; @NonNull private final List parameterTypes; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java index 941b67587ba..67ac4c06a2c 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexKind.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java index 773eba5cb8b..631a6584a27 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/IndexMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -73,7 +75,7 @@ default String describe(boolean pretty) { .append(getTable()) .append(String.format(" (%s)", getTarget())) .newLine() - .append(String.format("USING '%s'", getClassName())); + .append(String.format("USING '%s'", getClassName().get())); // Some options already appear in the CREATE statement, ignore them Map describedOptions = diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java index abda435ba02..e5080932b3c 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/KeyspaceMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.Map; +import java.util.Objects; import java.util.Optional; /** A keyspace in the schema metadata. */ @@ -245,4 +248,17 @@ default String describeWithChildren(boolean pretty) { return builder.build(); } + + default boolean shallowEquals(Object other) { + if (other == this) { + return true; + } else if (other instanceof KeyspaceMetadata) { + KeyspaceMetadata that = (KeyspaceMetadata) other; + return Objects.equals(this.getName(), that.getName()) + && this.isDurableWrites() == that.isDurableWrites() + && Objects.equals(this.getReplication(), that.getReplication()); + } else { + return false; + } + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java index 2cf5a803aa0..8b70ba04955 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/RelationMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java index 52a4208c813..ac7317574ed 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,7 @@ */ package com.datastax.oss.driver.api.core.metadata.schema; +import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.api.core.type.UserDefinedType; import edu.umd.cs.findbugs.annotations.NonNull; @@ -22,8 +25,9 @@ /** * Tracks schema changes. * - *

      An implementation of this interface can be registered in the configuration, or with {@link - * SessionBuilder#withSchemaChangeListener(SchemaChangeListener)}. + *

      Implementations of this interface can be registered either via the configuration (see {@code + * reference.conf} in the manual or core driver JAR), or programmatically via {@link + * SessionBuilder#addSchemaChangeListener(SchemaChangeListener)}. * *

      Note that the methods defined by this interface will be executed by internal driver threads, * and are therefore expected to have short execution times. If you need to perform long @@ -71,4 +75,20 @@ void onUserDefinedTypeUpdated( void onViewDropped(@NonNull ViewMetadata view); void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous); + + /** + * Invoked when the session is ready to process user requests. + * + *

      This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future + * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, + * this method will not get called. + * + *

      Listener methods are invoked from different threads; if you store the session in a field, + * make it at least volatile to guarantee proper publication. + * + *

      This method is guaranteed to be the first one invoked on this object. + * + *

      The default implementation is empty. + */ + default void onSessionReady(@NonNull Session session) {} } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java index 0d4adef2878..1cd449b39d8 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java index 425d08945c0..bcda226b45d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/TableMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.internal.core.metadata.schema.parsing.RelationParser; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.Map; +import java.util.Optional; /** A table in the schema metadata. */ public interface TableMetadata extends RelationMetadata { @@ -32,6 +35,17 @@ public interface TableMetadata extends RelationMetadata { @NonNull Map getIndexes(); + @NonNull + default Optional getIndex(@NonNull CqlIdentifier indexId) { + return Optional.ofNullable(getIndexes().get(indexId)); + } + + /** Shortcut for {@link #getIndex(CqlIdentifier) getIndex(CqlIdentifier.fromCql(indexName))}. */ + @NonNull + default Optional getIndex(@NonNull String indexName) { + return getIndex(CqlIdentifier.fromCql(indexName)); + } + @NonNull @Override default String describe(boolean pretty) { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java index 6d6b599e355..e6b06cffb97 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/schema/ViewMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java index af41553b7b4..f39de8ec5b6 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/Token.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java index 740c54caebc..e384300c571 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metadata/token/TokenRange.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java index c42fa6ca0ba..0e9934c7034 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultNodeMetric.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java index b9b01cf3364..63027a23fe7 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/DefaultSessionMetric.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java index 254be630a44..58d531b3464 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/Metrics.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,7 +29,7 @@ * *

      This type exists mainly to avoid a hard dependency to Dropwizard Metrics (that is, the JAR can * be completely removed from the classpath if metrics are disabled). It also provides convenience - * methods to access individual metrics programatically. + * methods to access individual metrics programmatically. */ public interface Metrics { @@ -53,10 +55,10 @@ public interface Metrics { * *

      {@code
          * // Correct:
      -   * Gauge connectedNodes = getNodeMetric(node, DefaultSessionMetric.CONNECTED_NODES);
      +   * Gauge connectedNodes = getSessionMetric(DefaultSessionMetric.CONNECTED_NODES);
          *
          * // Wrong, will throw CCE:
      -   * Counter connectedNodes = getNodeMetric(node, DefaultSessionMetric.CONNECTED_NODES);
      +   * Counter connectedNodes = getSessionMetric(DefaultSessionMetric.CONNECTED_NODES);
          * }
      * * @param profileName the name of the execution profile, or {@code null} if the metric is not diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java index 6f7c3c8e7f6..b31c0ed8bcf 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/NodeMetric.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,8 +23,9 @@ /** * A node-level metric exposed through {@link Session#getMetrics()}. * - *

      All metrics exposed out of the box by the driver are instances of {@link DefaultNodeMetric} - * (this interface only exists to allow custom metrics in driver extensions). + *

      All metrics exposed out of the box by the driver are instances of {@link DefaultNodeMetric} or + * {@link com.datastax.dse.driver.api.core.metrics.DseNodeMetric DseNodeMetric} (this interface only + * exists to allow custom metrics in driver extensions). * * @see SessionMetric */ diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java index 4b591e14085..2a1ee599754 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/metrics/SessionMetric.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +24,8 @@ * A session-level metric exposed through {@link Session#getMetrics()}. * *

      All metrics exposed out of the box by the driver are instances of {@link DefaultSessionMetric} - * (this interface only exists to allow custom metrics in driver extensions). + * or {@link com.datastax.dse.driver.api.core.metrics.DseSessionMetric DseSessionMetric} (this + * interface only exists to allow custom metrics in driver extensions). * * @see NodeMetric */ diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java index 919bdab6bd5..597b333267b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java b/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java new file mode 100644 index 00000000000..3cb838f3171 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/paging/OffsetPager.java @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.paging; + +import com.datastax.oss.driver.api.core.AsyncPagingIterable; +import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; +import com.datastax.oss.driver.api.core.PagingIterable; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import net.jcip.annotations.ThreadSafe; + +/** + * A utility to emulate offset queries on the client side (this comes with important performance + * trade-offs, make sure you read and understand the whole javadocs before using this class). + * + *

      Web UIs and services often provide paginated results with random access, for example: given a + * page size of 20 elements, fetch page 5. Cassandra does not support this natively (see CASSANDRA-6511), because such + * queries are inherently linear: the database would have to restart from the beginning every time, + * and skip unwanted rows until it reaches the desired offset. + * + *

      However, random pagination is a real need for many applications, and linear performance can be + * a reasonable trade-off if the cardinality stays low. This class provides a way to emulate this + * behavior on the client side. + * + *

      Performance considerations

      + * + * For each page that you want to retrieve: + * + *
        + *
      • you need to re-execute the query, in order to start with a fresh result set; + *
      • this class starts iterating from the beginning, and skips rows until it reaches the desired + * offset. + *
      + * + *
      + * + *
      + * String query = "SELECT ...";
      + * OffsetPager pager = new OffsetPager(20);
      + *
      + * // Get page 2: start from a fresh result set, throw away rows 1-20, then return rows 21-40
      + * ResultSet rs = session.execute(query);
      + * OffsetPager.Page<Row> page2 = pager.getPage(rs, 2);
      + *
      + * // Get page 5: start from a fresh result set, throw away rows 1-80, then return rows 81-100
      + * rs = session.execute(query);
      + * OffsetPager.Page<Row> page5 = pager.getPage(rs, 5);
      + * 
      + * + *

      Establishing application-level guardrails

      + * + * Linear performance should be fine for the values typically encountered in real-world + * applications: for example, if the page size is 25 and users never go past page 10, the worst case + * is only 250 rows, which is a very small result set. However, we strongly recommend that you + * implement hard limits in your application code: if the page number is exposed to the user (for + * example if it is passed as a URL parameter), make sure it is properly validated and enforce a + * maximum, so that an attacker can't inject a large value that could potentially fetch millions of + * rows. + * + *

      Relation with protocol-level paging

      + * + * Protocol-level paging refers to the ability to split large response into multiple network chunks: + * see {@link Statement#setPageSize(int)} and {@code basic.request.page-size} in the configuration. + * It happens under the hood, and is completely transparent for offset paging: this class will work + * the same no matter how many network roundtrips were needed to fetch the result. You don't need to + * set the protocol page size and the logical page size to the same value. + */ +@ThreadSafe +public class OffsetPager { + + /** A page returned as the result of an offset query. */ + public interface Page { + + /** The elements in the page. */ + @NonNull + List getElements(); + + /** + * The page number (1 for the first page, 2 for the second page, etc). + * + *

      Note that it may be different than the number you passed to {@link + * #getPage(PagingIterable, int)}: if the result set was too short, this is the actual number of + * the last page. + */ + int getPageNumber(); + + /** Whether this is the last page in the result set. */ + boolean isLast(); + } + + private final int pageSize; + + /** + * Creates a new instance. + * + * @param pageSize the number of elements per page. Must be greater than or equal to 1. + */ + public OffsetPager(int pageSize) { + if (pageSize < 1) { + throw new IllegalArgumentException("Invalid pageSize, expected >=1, got " + pageSize); + } + this.pageSize = pageSize; + } + + /** + * Extracts a page from a synchronous result set, by skipping rows until we get to the requested + * offset. + * + * @param iterable the iterable to extract the results from: typically a {@link ResultSet}, or a + * {@link PagingIterable} returned by the mapper. + * @param targetPageNumber the page to return (1 for the first page, 2 for the second page, etc). + * Must be greater than or equal to 1. + * @return the requested page, or the last page if the requested page was past the end of the + * iterable. + * @throws IllegalArgumentException if the conditions on the arguments are not respected. + */ + @NonNull + public Page getPage( + @NonNull PagingIterable iterable, final int targetPageNumber) { + + throwIfIllegalArguments(iterable, targetPageNumber); + + // Holds the contents of the target page. We also need to record the current page as we go, + // because our iterable is forward-only and we can't predict when we'll hit the end. + List currentPageElements = new ArrayList<>(); + + int currentPageNumber = 1; + int currentPageSize = 0; + for (ElementT element : iterable) { + currentPageSize += 1; + + if (currentPageSize > pageSize) { + currentPageNumber += 1; + currentPageSize = 1; + currentPageElements.clear(); + } + + currentPageElements.add(element); + + if (currentPageNumber == targetPageNumber && currentPageSize == pageSize) { + // The target page has the full size and we've seen all of its elements + break; + } + } + + // Either we have the full target page, or we've reached the end of the result set. + boolean isLast = iterable.one() == null; + return new DefaultPage<>(currentPageElements, currentPageNumber, isLast); + } + + /** + * Extracts a page from an asynchronous result set, by skipping rows until we get to the requested + * offset. + * + * @param iterable the iterable to extract the results from. Typically an {@link + * AsyncPagingIterable}, or a {@link MappedAsyncPagingIterable} returned by the mapper. + * @param targetPageNumber the page to return (1 for the first page, 2 for the second page, etc). + * Must be greater than or equal to 1. + * @return a stage that will complete with the requested page, or the last page if the requested + * page was past the end of the iterable. + * @throws IllegalArgumentException if the conditions on the arguments are not respected. + */ + @NonNull + public > + CompletionStage> getPage( + @NonNull IterableT iterable, final int targetPageNumber) { + + // Throw IllegalArgumentException directly instead of failing the stage, since it signals + // blatant programming errors + throwIfIllegalArguments(iterable, targetPageNumber); + + CompletableFuture> pageFuture = new CompletableFuture<>(); + getPage(iterable, targetPageNumber, 1, 0, new ArrayList<>(), pageFuture); + + return pageFuture; + } + + private void throwIfIllegalArguments(@NonNull Object iterable, int targetPageNumber) { + Objects.requireNonNull(iterable); + if (targetPageNumber < 1) { + throw new IllegalArgumentException( + "Invalid targetPageNumber, expected >=1, got " + targetPageNumber); + } + } + + /** + * Main method for the async iteration. + * + *

      See the synchronous version in {@link #getPage(PagingIterable, int)} for more explanations: + * this is identical, except that it is async and we need to handle protocol page transitions + * manually. + */ + private , ElementT> void getPage( + @NonNull IterableT iterable, + final int targetPageNumber, + int currentPageNumber, + int currentPageSize, + @NonNull List currentPageElements, + @NonNull CompletableFuture> pageFuture) { + + // Note: iterable.currentPage()/fetchNextPage() refer to protocol-level pages, do not confuse + // with logical pages handled by this class + Iterator currentFrame = iterable.currentPage().iterator(); + while (currentFrame.hasNext()) { + ElementT element = currentFrame.next(); + + currentPageSize += 1; + + if (currentPageSize > pageSize) { + currentPageNumber += 1; + currentPageSize = 1; + currentPageElements.clear(); + } + + currentPageElements.add(element); + + if (currentPageNumber == targetPageNumber && currentPageSize == pageSize) { + // Full-size target page. In this method it's simpler to finish directly here. + if (currentFrame.hasNext()) { + pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, false)); + } else if (!iterable.hasMorePages()) { + pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, true)); + } else { + // It's possible for the server to return an empty last frame, so we need to fetch it to + // know for sure whether there are more elements + int finalCurrentPageNumber = currentPageNumber; + iterable + .fetchNextPage() + .whenComplete( + (nextIterable, throwable) -> { + if (throwable != null) { + pageFuture.completeExceptionally(throwable); + } else { + boolean isLastPage = !nextIterable.currentPage().iterator().hasNext(); + pageFuture.complete( + new DefaultPage<>( + currentPageElements, finalCurrentPageNumber, isLastPage)); + } + }); + } + return; + } + } + + if (iterable.hasMorePages()) { + int finalCurrentPageNumber = currentPageNumber; + int finalCurrentPageSize = currentPageSize; + iterable + .fetchNextPage() + .whenComplete( + (nextIterable, throwable) -> { + if (throwable != null) { + pageFuture.completeExceptionally(throwable); + } else { + getPage( + nextIterable, + targetPageNumber, + finalCurrentPageNumber, + finalCurrentPageSize, + currentPageElements, + pageFuture); + } + }); + } else { + // Reached the end of the result set, finish with what we have so far + pageFuture.complete(new DefaultPage<>(currentPageElements, currentPageNumber, true)); + } + } + + private static class DefaultPage implements Page { + private final List elements; + private final int pageNumber; + private final boolean isLast; + + DefaultPage(@NonNull List elements, int pageNumber, boolean isLast) { + this.elements = ImmutableList.copyOf(elements); + this.pageNumber = pageNumber; + this.isLast = isLast; + } + + @NonNull + @Override + public List getElements() { + return elements; + } + + @Override + public int getPageNumber() { + return pageNumber; + } + + @Override + public boolean isLast() { + return isLast; + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java index 8859cdd6e4f..4b57b781822 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryDecision.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,11 @@ */ package com.datastax.oss.driver.api.core.retry; -/** A decision from the {@link RetryPolicy} on how to handle a retry. */ +/** + * A decision from the {@link RetryPolicy} on how to handle a retry. + * + * @see RetryVerdict#getRetryDecision() + */ public enum RetryDecision { /** Retry the operation on the same node. */ RETRY_SAME, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java index e36658c9d8e..e8546816e23 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -64,7 +66,10 @@ public interface RetryPolicy extends AutoCloseable { * {@link ReadTimeoutException#wasDataPresent()}. * @param retryCount how many times the retry policy has been invoked already for this request * (not counting the current invocation). + * @deprecated As of version 4.10, use {@link #onReadTimeoutVerdict(Request, ConsistencyLevel, + * int, int, boolean, int)} instead. */ + @Deprecated RetryDecision onReadTimeout( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -73,6 +78,34 @@ RetryDecision onReadTimeout( boolean dataPresent, int retryCount); + /** + * Whether to retry when the server replied with a {@code READ_TIMEOUT} error; this indicates a + * server-side timeout during a read query, i.e. some replicas did not reply to the + * coordinator in time. + * + * @param request the request that timed out. + * @param cl the requested consistency level. + * @param blockFor the minimum number of replica acknowledgements/responses that were required to + * fulfill the operation. + * @param received the number of replica that had acknowledged/responded to the operation before + * it failed. + * @param dataPresent whether the actual data was amongst the received replica responses. See + * {@link ReadTimeoutException#wasDataPresent()}. + * @param retryCount how many times the retry policy has been invoked already for this request + * (not counting the current invocation). + */ + default RetryVerdict onReadTimeoutVerdict( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int blockFor, + int received, + boolean dataPresent, + int retryCount) { + RetryDecision decision = + onReadTimeout(request, cl, blockFor, received, dataPresent, retryCount); + return () -> decision; + } + /** * Whether to retry when the server replied with a {@code WRITE_TIMEOUT} error; this indicates a * server-side timeout during a write query, i.e. some replicas did not reply to the @@ -92,7 +125,10 @@ RetryDecision onReadTimeout( * it failed. * @param retryCount how many times the retry policy has been invoked already for this request * (not counting the current invocation). + * @deprecated As of version 4.10, use {@link #onWriteTimeoutVerdict(Request, ConsistencyLevel, + * WriteType, int, int, int)} instead. */ + @Deprecated RetryDecision onWriteTimeout( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -101,6 +137,37 @@ RetryDecision onWriteTimeout( int received, int retryCount); + /** + * Whether to retry when the server replied with a {@code WRITE_TIMEOUT} error; this indicates a + * server-side timeout during a write query, i.e. some replicas did not reply to the + * coordinator in time. + * + *

      Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} + * requests: when a write times out, it is impossible to determine with 100% certainty whether the + * mutation was applied or not, so the write is never safe to retry; the driver will rethrow the + * error directly, without invoking the retry policy. + * + * @param request the request that timed out. + * @param cl the requested consistency level. + * @param writeType the type of the write for which the timeout was raised. + * @param blockFor the minimum number of replica acknowledgements/responses that were required to + * fulfill the operation. + * @param received the number of replica that had acknowledged/responded to the operation before + * it failed. + * @param retryCount how many times the retry policy has been invoked already for this request + * (not counting the current invocation). + */ + default RetryVerdict onWriteTimeoutVerdict( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + @NonNull WriteType writeType, + int blockFor, + int received, + int retryCount) { + RetryDecision decision = onWriteTimeout(request, cl, writeType, blockFor, received, retryCount); + return () -> decision; + } + /** * Whether to retry when the server replied with an {@code UNAVAILABLE} error; this indicates that * the coordinator determined that there were not enough replicas alive to perform a query with @@ -114,7 +181,10 @@ RetryDecision onWriteTimeout( * tried to execute the operation. * @param retryCount how many times the retry policy has been invoked already for this request * (not counting the current invocation). + * @deprecated As of version 4.10, use {@link #onUnavailableVerdict(Request, ConsistencyLevel, + * int, int, int)} instead. */ + @Deprecated RetryDecision onUnavailable( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -122,6 +192,30 @@ RetryDecision onUnavailable( int alive, int retryCount); + /** + * Whether to retry when the server replied with an {@code UNAVAILABLE} error; this indicates that + * the coordinator determined that there were not enough replicas alive to perform a query with + * the requested consistency level. + * + * @param request the request that timed out. + * @param cl the requested consistency level. + * @param required the number of replica acknowledgements/responses required to perform the + * operation (with its required consistency level). + * @param alive the number of replicas that were known to be alive by the coordinator node when it + * tried to execute the operation. + * @param retryCount how many times the retry policy has been invoked already for this request + * (not counting the current invocation). + */ + default RetryVerdict onUnavailableVerdict( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int required, + int alive, + int retryCount) { + RetryDecision decision = onUnavailable(request, cl, required, alive, retryCount); + return () -> decision; + } + /** * Whether to retry when a request was aborted before we could get a response from the server. * @@ -139,10 +233,37 @@ RetryDecision onUnavailable( * @param error the error. * @param retryCount how many times the retry policy has been invoked already for this request * (not counting the current invocation). + * @deprecated As of version 4.10, use {@link #onRequestAbortedVerdict(Request, Throwable, int)} + * instead. */ + @Deprecated RetryDecision onRequestAborted( @NonNull Request request, @NonNull Throwable error, int retryCount); + /** + * Whether to retry when a request was aborted before we could get a response from the server. + * + *

      This can happen in two cases: if the connection was closed due to an external event (this + * will manifest as a {@link ClosedConnectionException}, or {@link HeartbeatException} for a + * heartbeat failure); or if there was an unexpected error while decoding the response (this can + * only be a driver bug). + * + *

      Note that this method will only be invoked for {@linkplain Request#isIdempotent() + * idempotent} requests: when execution was aborted before getting a response, it is impossible to + * determine with 100% certainty whether a mutation was applied or not, so a write is never safe + * to retry; the driver will rethrow the error directly, without invoking the retry policy. + * + * @param request the request that was aborted. + * @param error the error. + * @param retryCount how many times the retry policy has been invoked already for this request + * (not counting the current invocation). + */ + default RetryVerdict onRequestAbortedVerdict( + @NonNull Request request, @NonNull Throwable error, int retryCount) { + RetryDecision decision = onRequestAborted(request, error, retryCount); + return () -> decision; + } + /** * Whether to retry when the server replied with a recoverable error (other than {@code * READ_TIMEOUT}, {@code WRITE_TIMEOUT} or {@code UNAVAILABLE}). @@ -168,10 +289,45 @@ RetryDecision onRequestAborted( * @param error the error. * @param retryCount how many times the retry policy has been invoked already for this request * (not counting the current invocation). + * @deprecated As of version 4.10, use {@link #onErrorResponseVerdict(Request, + * CoordinatorException, int)} instead. */ + @Deprecated RetryDecision onErrorResponse( @NonNull Request request, @NonNull CoordinatorException error, int retryCount); + /** + * Whether to retry when the server replied with a recoverable error (other than {@code + * READ_TIMEOUT}, {@code WRITE_TIMEOUT} or {@code UNAVAILABLE}). + * + *

      This can happen for the following errors: {@link OverloadedException}, {@link ServerError}, + * {@link TruncateException}, {@link ReadFailureException}, {@link WriteFailureException}. + * + *

      The following errors are handled internally by the driver, and therefore will never + * be encountered in this method: + * + *

        + *
      • {@link BootstrappingException}: always retried on the next node; + *
      • {@link QueryValidationException} (and its subclasses), {@link FunctionFailureException} + * and {@link ProtocolError}: always rethrown. + *
      + * + *

      Note that this method will only be invoked for {@link Request#isIdempotent()} idempotent} + * requests: when execution was aborted before getting a response, it is impossible to determine + * with 100% certainty whether a mutation was applied or not, so a write is never safe to retry; + * the driver will rethrow the error directly, without invoking the retry policy. + * + * @param request the request that failed. + * @param error the error. + * @param retryCount how many times the retry policy has been invoked already for this request + * (not counting the current invocation). + */ + default RetryVerdict onErrorResponseVerdict( + @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { + RetryDecision decision = onErrorResponse(request, error, retryCount); + return () -> decision; + } + /** Called when the cluster that this policy is associated with closes. */ @Override void close(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java new file mode 100644 index 00000000000..9abb54156db --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/retry/RetryVerdict.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.retry; + +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.internal.core.retry.DefaultRetryVerdict; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * The verdict returned by a {@link RetryPolicy} determining what to do when a request failed. A + * verdict contains a {@link RetryDecision} indicating if a retry should be attempted at all and + * where, and a method that allows the original request to be modified before the retry. + */ +@FunctionalInterface +public interface RetryVerdict { + + /** A retry verdict that retries the same request on the same node. */ + RetryVerdict RETRY_SAME = new DefaultRetryVerdict(RetryDecision.RETRY_SAME); + + /** A retry verdict that retries the same request on the next node in the query plan. */ + RetryVerdict RETRY_NEXT = new DefaultRetryVerdict(RetryDecision.RETRY_NEXT); + + /** A retry verdict that ignores the error, returning and empty result set to the caller. */ + RetryVerdict IGNORE = new DefaultRetryVerdict(RetryDecision.IGNORE); + + /** A retry verdict that rethrows the execution error to the calling code. */ + RetryVerdict RETHROW = new DefaultRetryVerdict(RetryDecision.RETHROW); + + /** @return The retry decision to apply. */ + @NonNull + RetryDecision getRetryDecision(); + + /** + * Returns the request to retry, based on the request that was just executed (and failed). + * + *

      The default retry policy always returns the request as is. Custom retry policies can use + * this method to customize the request to retry, for example, by changing its consistency level, + * query timestamp, custom payload, or even its execution profile. + * + * @param The actual type of the request. + * @param previous The request that was just executed (and failed). + * @return The request to retry. + */ + @NonNull + default RequestT getRetryRequest(@NonNull RequestT previous) { + return previous; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java index 7128c8c2d4f..2bf541c91de 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/AlreadyExistsException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java index bd96aff1518..a408e0384f5 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/BootstrappingException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java new file mode 100644 index 00000000000..477bf7813c9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CASWriteUnknownException.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.servererrors; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.DriverException; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.session.Request; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * The result of a CAS operation is in an unknown state. + * + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, + * CoordinatorException, int)} , which will decide if it is rethrown directly to the client or if + * the request should be retried. If all other tried nodes also fail, this exception will appear in + * the {@link AllNodesFailedException} thrown to the client. + */ +public class CASWriteUnknownException extends QueryConsistencyException { + + public CASWriteUnknownException( + @NonNull Node coordinator, + @NonNull ConsistencyLevel consistencyLevel, + int received, + int blockFor) { + this( + coordinator, + String.format( + "CAS operation result is unknown - proposal was not accepted by a quorum. (%d / %d)", + received, blockFor), + consistencyLevel, + received, + blockFor, + null, + false); + } + + private CASWriteUnknownException( + @NonNull Node coordinator, + @NonNull String message, + @NonNull ConsistencyLevel consistencyLevel, + int received, + int blockFor, + ExecutionInfo executionInfo, + boolean writableStackTrace) { + super( + coordinator, + message, + consistencyLevel, + received, + blockFor, + executionInfo, + writableStackTrace); + } + + @NonNull + @Override + public DriverException copy() { + return new CASWriteUnknownException( + getCoordinator(), + getMessage(), + getConsistencyLevel(), + getReceived(), + getBlockFor(), + getExecutionInfo(), + true); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java new file mode 100644 index 00000000000..3ce782653ab --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CDCWriteFailureException.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.servererrors; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.DriverException; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.session.Request; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * An attempt was made to write to a commitlog segment which doesn't support CDC mutations. + * + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, + * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the + * request should be retried. If all other tried nodes also fail, this exception will appear in the + * {@link AllNodesFailedException} thrown to the client. + */ +public class CDCWriteFailureException extends QueryExecutionException { + + public CDCWriteFailureException(@NonNull Node coordinator) { + super(coordinator, "Commitlog does not support CDC mutations", null, false); + } + + public CDCWriteFailureException(@NonNull Node coordinator, @NonNull String message) { + super(coordinator, "Commitlog does not support CDC mutations", null, false); + } + + private CDCWriteFailureException( + @NonNull Node coordinator, + @NonNull String message, + @Nullable ExecutionInfo executionInfo, + boolean writableStackTrace) { + super(coordinator, message, executionInfo, writableStackTrace); + } + + @NonNull + @Override + public DriverException copy() { + return new CDCWriteFailureException(getCoordinator(), getMessage(), getExecutionInfo(), true); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java index 975d7252747..8f6052850df 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/CoordinatorException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java index b7fe225ff61..a24097e6e5b 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/DefaultWriteType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,4 +59,7 @@ public enum DefaultWriteType implements WriteType { */ CDC, ; + // Note that, for the sake of convenience, we also expose shortcuts to these constants on the + // WriteType interface. If you add a new enum constant, remember to update the interface as + // well. } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java index 1a9b178d0fe..31993762319 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java index 900d9c281ed..405efa47299 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidConfigurationInQueryException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java index bbf50a5b5fb..468de8a1bd0 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/InvalidQueryException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java index 4b7b4bb6d9a..f56a7f30a7e 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/OverloadedException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,7 +29,7 @@ /** * Thrown when the coordinator reported itself as being overloaded. * - *

      This exception is processed by {@link RetryPolicy#onErrorResponse(Request, + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the * request should be retried. If all other tried nodes also fail, this exception will appear in the * {@link AllNodesFailedException} thrown to the client. @@ -35,7 +37,11 @@ public class OverloadedException extends QueryExecutionException { public OverloadedException(@NonNull Node coordinator) { - super(coordinator, String.format("%s is bootstrapping", coordinator), null, false); + super(coordinator, String.format("%s is overloaded", coordinator), null, false); + } + + public OverloadedException(@NonNull Node coordinator, @NonNull String message) { + super(coordinator, String.format("%s is overloaded: %s", coordinator, message), null, false); } private OverloadedException( diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java index 813bfa6cae9..898a857954f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ProtocolError.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java index 67c4fcd9ca0..4a6f97f3342 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryConsistencyException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java index a23d9d9dca7..541a32d9fba 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryExecutionException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java index 448a6026892..9c8dfe537b6 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java index adecf20ccbe..94c4404f8d9 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +35,7 @@ *

      This happens when some of the replicas that were contacted by the coordinator replied with an * error. * - *

      This exception is processed by {@link RetryPolicy#onErrorResponse(Request, + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the * request should be retried. If all other tried nodes also fail, this exception will appear in the * {@link AllNodesFailedException} thrown to the client. diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java index 1d199a695eb..4dddfedf49a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,10 +29,10 @@ /** * A server-side timeout during a read query. * - *

      This exception is processed by {@link RetryPolicy#onReadTimeout(Request, ConsistencyLevel, - * int, int, boolean, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. + *

      This exception is processed by {@link RetryPolicy#onReadTimeoutVerdict(Request, + * ConsistencyLevel, int, int, boolean, int)}, which will decide if it is rethrown directly to the + * client or if the request should be retried. If all other tried nodes also fail, this exception + * will appear in the {@link AllNodesFailedException} thrown to the client. */ public class ReadTimeoutException extends QueryConsistencyException { @@ -45,7 +47,8 @@ public ReadTimeoutException( this( coordinator, String.format( - "Cassandra timeout during read query at consistency %s (%s)", + "Cassandra timeout during read query at consistency %s (%s). " + + "In case this was generated during read repair, the consistency level is not representative of the actual consistency.", consistencyLevel, formatDetails(received, blockFor, dataPresent)), consistencyLevel, received, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java index 9afe5ea45b3..de300803421 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/ServerError.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,7 +31,7 @@ * *

      This should be considered as a server bug and reported as such. * - *

      This exception is processed by {@link RetryPolicy#onErrorResponse(Request, + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the * request should be retried. If all other tried nodes also fail, this exception will appear in the * {@link AllNodesFailedException} thrown to the client. diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java index 32c5e930741..708068c0299 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/SyntaxError.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java index 12f265e135d..2091d166e98 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/TruncateException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,7 +29,7 @@ /** * An error during a truncation operation. * - *

      This exception is processed by {@link RetryPolicy#onErrorResponse(Request, + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the * request should be retried. If all other tried nodes also fail, this exception will appear in the * {@link AllNodesFailedException} thrown to the client. diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java index e15ab02dc4b..7a6235422de 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnauthorizedException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java index 98e119791d8..b9e9848ce36 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/UnavailableException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,10 +30,10 @@ * Thrown when the coordinator knows there is not enough replicas alive to perform a query with the * requested consistency level. * - *

      This exception is processed by {@link RetryPolicy#onUnavailable(Request, ConsistencyLevel, - * int, int, int)}, which will decide if it is rethrown directly to the client or if the request - * should be retried. If all other tried nodes also fail, this exception will appear in the {@link - * AllNodesFailedException} thrown to the client. + *

      This exception is processed by {@link RetryPolicy#onUnavailableVerdict(Request, + * ConsistencyLevel, int, int, int)}, which will decide if it is rethrown directly to the client or + * if the request should be retried. If all other tried nodes also fail, this exception will appear + * in the {@link AllNodesFailedException} thrown to the client. */ public class UnavailableException extends QueryExecutionException { private final ConsistencyLevel consistencyLevel; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java index f2589ff1b65..ffbbd2aef6f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +35,7 @@ *

      This happens when some of the replicas that were contacted by the coordinator replied with an * error. * - *

      This exception is processed by {@link RetryPolicy#onErrorResponse(Request, + *

      This exception is processed by {@link RetryPolicy#onErrorResponseVerdict(Request, * CoordinatorException, int)}, which will decide if it is rethrown directly to the client or if the * request should be retried. If all other tried nodes also fail, this exception will appear in the * {@link AllNodesFailedException} thrown to the client. diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java index 600b5e36895..9913dbd0a91 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,10 +30,10 @@ /** * A server-side timeout during a write query. * - *

      This exception is processed by {@link RetryPolicy#onWriteTimeout(Request, ConsistencyLevel, - * WriteType, int, int, int)}, which will decide if it is rethrown directly to the client or if the - * request should be retried. If all other tried nodes also fail, this exception will appear in the - * {@link AllNodesFailedException} thrown to the client. + *

      This exception is processed by {@link RetryPolicy#onWriteTimeoutVerdict(Request, + * ConsistencyLevel, WriteType, int, int, int)}, which will decide if it is rethrown directly to the + * client or if the request should be retried. If all other tried nodes also fail, this exception + * will appear in the {@link AllNodesFailedException} thrown to the client. */ public class WriteTimeoutException extends QueryConsistencyException { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java index e34d90ad78b..05ad99e5ce4 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/servererrors/WriteType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,6 +31,15 @@ */ public interface WriteType { + WriteType SIMPLE = DefaultWriteType.SIMPLE; + WriteType BATCH = DefaultWriteType.BATCH; + WriteType UNLOGGED_BATCH = DefaultWriteType.UNLOGGED_BATCH; + WriteType COUNTER = DefaultWriteType.COUNTER; + WriteType BATCH_LOG = DefaultWriteType.BATCH_LOG; + WriteType CAS = DefaultWriteType.CAS; + WriteType VIEW = DefaultWriteType.VIEW; + WriteType CDC = DefaultWriteType.CDC; + /** The textual representation that the write type is encoded to in protocol frames. */ @NonNull String name(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java index aeda89d3f2f..5e10fb4d915 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/session/ProgrammaticArguments.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +17,30 @@ */ package com.datastax.oss.driver.api.core.session; +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeStateListener; import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; import com.datastax.oss.driver.api.core.tracker.RequestTracker; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.internal.core.loadbalancing.helper.NodeFilterToDistanceEvaluatorAdapter; +import com.datastax.oss.driver.internal.core.metadata.MultiplexingNodeStateListener; +import com.datastax.oss.driver.internal.core.metadata.schema.MultiplexingSchemaChangeListener; +import com.datastax.oss.driver.internal.core.tracker.MultiplexingRequestTracker; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.net.InetSocketAddress; import java.util.List; import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.UUID; import java.util.function.Predicate; /** @@ -45,25 +60,56 @@ public static Builder builder() { private final NodeStateListener nodeStateListener; private final SchemaChangeListener schemaChangeListener; private final RequestTracker requestTracker; + private final RequestIdGenerator requestIdGenerator; private final Map localDatacenters; private final Map> nodeFilters; + private final Map nodeDistanceEvaluators; private final ClassLoader classLoader; + private final AuthProvider authProvider; + private final SslEngineFactory sslEngineFactory; + private final InetSocketAddress cloudProxyAddress; + private final UUID startupClientId; + private final String startupApplicationName; + private final String startupApplicationVersion; + private final MutableCodecRegistry codecRegistry; + private final Object metricRegistry; private ProgrammaticArguments( @NonNull List> typeCodecs, @Nullable NodeStateListener nodeStateListener, @Nullable SchemaChangeListener schemaChangeListener, @Nullable RequestTracker requestTracker, + @Nullable RequestIdGenerator requestIdGenerator, @NonNull Map localDatacenters, @NonNull Map> nodeFilters, - @Nullable ClassLoader classLoader) { + @NonNull Map nodeDistanceEvaluators, + @Nullable ClassLoader classLoader, + @Nullable AuthProvider authProvider, + @Nullable SslEngineFactory sslEngineFactory, + @Nullable InetSocketAddress cloudProxyAddress, + @Nullable UUID startupClientId, + @Nullable String startupApplicationName, + @Nullable String startupApplicationVersion, + @Nullable MutableCodecRegistry codecRegistry, + @Nullable Object metricRegistry) { + this.typeCodecs = typeCodecs; this.nodeStateListener = nodeStateListener; this.schemaChangeListener = schemaChangeListener; this.requestTracker = requestTracker; + this.requestIdGenerator = requestIdGenerator; this.localDatacenters = localDatacenters; this.nodeFilters = nodeFilters; + this.nodeDistanceEvaluators = nodeDistanceEvaluators; this.classLoader = classLoader; + this.authProvider = authProvider; + this.sslEngineFactory = sslEngineFactory; + this.cloudProxyAddress = cloudProxyAddress; + this.startupClientId = startupClientId; + this.startupApplicationName = startupApplicationName; + this.startupApplicationVersion = startupApplicationVersion; + this.codecRegistry = codecRegistry; + this.metricRegistry = metricRegistry; } @NonNull @@ -86,31 +132,94 @@ public RequestTracker getRequestTracker() { return requestTracker; } + @Nullable + public RequestIdGenerator getRequestIdGenerator() { + return requestIdGenerator; + } + @NonNull public Map getLocalDatacenters() { return localDatacenters; } @NonNull + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") public Map> getNodeFilters() { return nodeFilters; } + @NonNull + public Map getNodeDistanceEvaluators() { + return nodeDistanceEvaluators; + } + @Nullable public ClassLoader getClassLoader() { return classLoader; } + @Nullable + public AuthProvider getAuthProvider() { + return authProvider; + } + + @Nullable + public SslEngineFactory getSslEngineFactory() { + return sslEngineFactory; + } + + @Nullable + public InetSocketAddress getCloudProxyAddress() { + return cloudProxyAddress; + } + + @Nullable + public UUID getStartupClientId() { + return startupClientId; + } + + @Nullable + public String getStartupApplicationName() { + return startupApplicationName; + } + + @Nullable + public String getStartupApplicationVersion() { + return startupApplicationVersion; + } + + @Nullable + public MutableCodecRegistry getCodecRegistry() { + return codecRegistry; + } + + @Nullable + public Object getMetricRegistry() { + return metricRegistry; + } + public static class Builder { - private ImmutableList.Builder> typeCodecsBuilder = ImmutableList.builder(); + private final ImmutableList.Builder> typeCodecsBuilder = ImmutableList.builder(); private NodeStateListener nodeStateListener; private SchemaChangeListener schemaChangeListener; private RequestTracker requestTracker; + private RequestIdGenerator requestIdGenerator; private ImmutableMap.Builder localDatacentersBuilder = ImmutableMap.builder(); - private ImmutableMap.Builder> nodeFiltersBuilder = + private final ImmutableMap.Builder> nodeFiltersBuilder = ImmutableMap.builder(); + private final ImmutableMap.Builder + nodeDistanceEvaluatorsBuilder = ImmutableMap.builder(); private ClassLoader classLoader; + private AuthProvider authProvider; + private SslEngineFactory sslEngineFactory; + private InetSocketAddress cloudProxyAddress; + private UUID startupClientId; + private String startupApplicationName; + private String startupApplicationVersion; + private MutableCodecRegistry codecRegistry; + private Object metricRegistry; @NonNull public Builder addTypeCodecs(@NonNull TypeCodec... typeCodecs) { @@ -124,18 +233,83 @@ public Builder withNodeStateListener(@Nullable NodeStateListener nodeStateListen return this; } + @NonNull + public Builder addNodeStateListener(@NonNull NodeStateListener nodeStateListener) { + Objects.requireNonNull(nodeStateListener, "nodeStateListener cannot be null"); + if (this.nodeStateListener == null) { + this.nodeStateListener = nodeStateListener; + } else { + NodeStateListener previousListener = this.nodeStateListener; + if (previousListener instanceof MultiplexingNodeStateListener) { + ((MultiplexingNodeStateListener) previousListener).register(nodeStateListener); + } else { + MultiplexingNodeStateListener multiplexingNodeStateListener = + new MultiplexingNodeStateListener(); + multiplexingNodeStateListener.register(previousListener); + multiplexingNodeStateListener.register(nodeStateListener); + this.nodeStateListener = multiplexingNodeStateListener; + } + } + return this; + } + @NonNull public Builder withSchemaChangeListener(@Nullable SchemaChangeListener schemaChangeListener) { this.schemaChangeListener = schemaChangeListener; return this; } + @NonNull + public Builder addSchemaChangeListener(@NonNull SchemaChangeListener schemaChangeListener) { + Objects.requireNonNull(schemaChangeListener, "schemaChangeListener cannot be null"); + if (this.schemaChangeListener == null) { + this.schemaChangeListener = schemaChangeListener; + } else { + SchemaChangeListener previousListener = this.schemaChangeListener; + if (previousListener instanceof MultiplexingSchemaChangeListener) { + ((MultiplexingSchemaChangeListener) previousListener).register(schemaChangeListener); + } else { + MultiplexingSchemaChangeListener multiplexingSchemaChangeListener = + new MultiplexingSchemaChangeListener(); + multiplexingSchemaChangeListener.register(previousListener); + multiplexingSchemaChangeListener.register(schemaChangeListener); + this.schemaChangeListener = multiplexingSchemaChangeListener; + } + } + return this; + } + @NonNull public Builder withRequestTracker(@Nullable RequestTracker requestTracker) { this.requestTracker = requestTracker; return this; } + @NonNull + public Builder addRequestTracker(@NonNull RequestTracker requestTracker) { + Objects.requireNonNull(requestTracker, "requestTracker cannot be null"); + if (this.requestTracker == null) { + this.requestTracker = requestTracker; + } else { + RequestTracker previousTracker = this.requestTracker; + if (previousTracker instanceof MultiplexingRequestTracker) { + ((MultiplexingRequestTracker) previousTracker).register(requestTracker); + } else { + MultiplexingRequestTracker multiplexingRequestTracker = new MultiplexingRequestTracker(); + multiplexingRequestTracker.register(previousTracker); + multiplexingRequestTracker.register(requestTracker); + this.requestTracker = multiplexingRequestTracker; + } + } + return this; + } + + @NonNull + public Builder withRequestIdGenerator(@Nullable RequestIdGenerator requestIdGenerator) { + this.requestIdGenerator = requestIdGenerator; + return this; + } + @NonNull public Builder withLocalDatacenter( @NonNull String profileName, @NonNull String localDatacenter) { @@ -143,6 +317,12 @@ public Builder withLocalDatacenter( return this; } + @NonNull + public Builder clearDatacenters() { + this.localDatacentersBuilder = ImmutableMap.builder(); + return this; + } + @NonNull public Builder withLocalDatacenters(Map localDatacenters) { for (Map.Entry entry : localDatacenters.entrySet()) { @@ -152,16 +332,42 @@ public Builder withLocalDatacenters(Map localDatacenters) { } @NonNull + public Builder withNodeDistanceEvaluator( + @NonNull String profileName, @NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { + this.nodeDistanceEvaluatorsBuilder.put(profileName, nodeDistanceEvaluator); + return this; + } + + @NonNull + public Builder withNodeDistanceEvaluators( + Map nodeDistanceReporters) { + for (Entry entry : nodeDistanceReporters.entrySet()) { + this.nodeDistanceEvaluatorsBuilder.put(entry.getKey(), entry.getValue()); + } + return this; + } + + /** + * @deprecated Use {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} instead. + */ + @NonNull + @Deprecated public Builder withNodeFilter( @NonNull String profileName, @NonNull Predicate nodeFilter) { this.nodeFiltersBuilder.put(profileName, nodeFilter); + this.nodeDistanceEvaluatorsBuilder.put( + profileName, new NodeFilterToDistanceEvaluatorAdapter(nodeFilter)); return this; } + /** @deprecated Use {@link #withNodeDistanceEvaluators(Map)} instead. */ @NonNull + @Deprecated public Builder withNodeFilters(Map> nodeFilters) { for (Map.Entry> entry : nodeFilters.entrySet()) { this.nodeFiltersBuilder.put(entry.getKey(), entry.getValue()); + this.nodeDistanceEvaluatorsBuilder.put( + entry.getKey(), new NodeFilterToDistanceEvaluatorAdapter(entry.getValue())); } return this; } @@ -172,6 +378,54 @@ public Builder withClassLoader(@Nullable ClassLoader classLoader) { return this; } + @NonNull + public Builder withCloudProxyAddress(@Nullable InetSocketAddress cloudAddress) { + this.cloudProxyAddress = cloudAddress; + return this; + } + + @NonNull + public Builder withAuthProvider(@Nullable AuthProvider authProvider) { + this.authProvider = authProvider; + return this; + } + + @NonNull + public Builder withSslEngineFactory(@Nullable SslEngineFactory sslEngineFactory) { + this.sslEngineFactory = sslEngineFactory; + return this; + } + + @NonNull + public Builder withStartupClientId(@Nullable UUID startupClientId) { + this.startupClientId = startupClientId; + return this; + } + + @NonNull + public Builder withStartupApplicationName(@Nullable String startupApplicationName) { + this.startupApplicationName = startupApplicationName; + return this; + } + + @NonNull + public Builder withStartupApplicationVersion(@Nullable String startupApplicationVersion) { + this.startupApplicationVersion = startupApplicationVersion; + return this; + } + + @NonNull + public Builder withCodecRegistry(@Nullable MutableCodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + return this; + } + + @NonNull + public Builder withMetricRegistry(@Nullable Object metricRegistry) { + this.metricRegistry = metricRegistry; + return this; + } + @NonNull public ProgrammaticArguments build() { return new ProgrammaticArguments( @@ -179,9 +433,19 @@ public ProgrammaticArguments build() { nodeStateListener, schemaChangeListener, requestTracker, + requestIdGenerator, localDatacentersBuilder.build(), nodeFiltersBuilder.build(), - classLoader); + nodeDistanceEvaluatorsBuilder.build(), + classLoader, + authProvider, + sslEngineFactory, + cloudProxyAddress, + startupClientId, + startupApplicationName, + startupApplicationVersion, + codecRegistry, + metricRegistry); } } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java index b9645fc7e43..7d122276cbf 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/session/Request.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -78,47 +80,53 @@ public interface Request { CqlIdentifier getKeyspace(); /** - * The keyspace to use for token-aware routing, if no {@link #getKeyspace() per-request keyspace} - * is defined, or {@code null} if this request does not use token-aware routing. + * The keyspace to use for token-aware routing. * - *

      See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. + *

      Note that if a {@linkplain #getKeyspace() per-request keyspace} is already defined for this + * request, it takes precedence over this method. * - *

      Note that this is the only way to define a routing keyspace for protocol v4 or lower. + *

      See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. */ @Nullable CqlIdentifier getRoutingKeyspace(); /** - * The (encoded) partition key to use for token-aware routing, or {@code null} if this request - * does not use token-aware routing. + * The partition key to use for token-aware routing. * - *

      When the driver picks a coordinator to execute a request, it prioritizes the replicas of the - * partition that this query operates on, in order to avoid an extra network jump on the server - * side. To find these replicas, it needs a keyspace (which is where the replication settings are - * defined) and a key, that are computed the following way: + *

      For each request, the driver tries to determine a routing keyspace and a + * routing key by calling the following methods: * *

        - *
      • if a per-request keyspace is specified with {@link #getKeyspace()}, it is used as the - * keyspace; - *
      • otherwise, if {@link #getRoutingKeyspace()} is specified, it is used as the keyspace; - *
      • otherwise, if {@link Session#getKeyspace()} is not {@code null}, it is used as the - * keyspace; - *
      • if a routing token is defined with {@link #getRoutingToken()}, it is used as the key; - *
      • otherwise, the result of this method is used as the key. + *
      • routing keyspace: + *
          + *
        • the result of {@link #getKeyspace()}, if not null; + *
        • otherwise, the result of {@link #getRoutingKeyspace()}, if not null; + *
        • otherwise, the result of {@link Session#getKeyspace()}, if not empty; + *
        • otherwise, null. + *
        + *
      • routing key: + *
          + *
        • the result of {@link #getRoutingToken()}, if not null; + *
        • otherwise, the result of {@link #getRoutingKey()}, if not null; + *
        • otherwise, null. + *
        *
      * - * If either keyspace or key is {@code null} at the end of this process, then token-aware routing - * is disabled. + * This provides a hint of the partition that the request operates on. When the driver picks a + * coordinator for execution, it will prioritize the replicas that own that partition, in order to + * avoid an extra network jump on the server side. + * + *

      Routing information is optional: if either keyspace or key is null, token-aware routing is + * disabled for this request. */ @Nullable ByteBuffer getRoutingKey(); /** - * The token to use for token-aware routing, or {@code null} if this request does not use - * token-aware routing. + * The token to use for token-aware routing. * - *

      This is the same information as {@link #getRoutingKey()}, but already hashed in a token. It - * is probably more useful for analytics tools that "shard" a query on a set of token ranges. + *

      This is an alternative to {@link #getRoutingKey()}. Both methods represent the same + * information, a request can provide one or the other. * *

      See {@link #getRoutingKey()} for a detailed explanation of token-aware routing. */ diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java index 65c49988f0c..e047bf2fe09 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/session/Session.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,15 +40,13 @@ /** * A nexus to send requests to a Cassandra cluster. * - *

      This is a high-level abstraction capable of handling arbitrary request and result types. For - * CQL statements, {@link CqlSession} provides convenience methods with more familiar signatures (by - * default, all the instances returned by the driver also implement {@link CqlSession}). + *

      This is a high-level abstraction capable of handling arbitrary request and result types. The + * driver's built-in {@link CqlSession} is a more convenient subtype for most client applications. * *

      The driver's request execution logic is pluggable (see {@code RequestProcessor} in the - * internal API). This is intended for future extensions, for example a reactive API for CQL - * statements, or graph requests in the Datastax Enterprise driver. Hence the generic {@link - * #execute(Request, GenericType)} method in this interface, that makes no assumptions about the - * request or result type. + * internal API) to allow custom extensions. Hence the generic {@link #execute(Request, + * GenericType)} method in this interface, that makes no assumptions about the request or result + * type. * * @see CqlSession#builder() */ @@ -64,9 +64,15 @@ public interface Session extends AsyncAutoCloseable { Session.class.getResource("/com/datastax/oss/driver/Driver.properties")); /** - * The unique name identifying this client. + * The unique name identifying this session instance. This is used as a prefix for log messages + * and metrics. + * + *

      This gets populated from the option {@code basic.session-name} in the configuration. If that + * option is absent, the driver will generate an identifier composed of the letter 's' followed by + * an incrementing counter. * - * @see DefaultDriverOption#SESSION_NAME + *

      Note that this is purely a client-side identifier; in particular, it has no relation with + * {@code system.local.cluster_name} on the server. */ @NonNull String getName(); @@ -193,8 +199,15 @@ default boolean checkSchemaAgreement() { Optional getKeyspace(); /** - * Returns a gateway to the driver's metrics, or {@link Optional#empty()} if all metrics are - * disabled. + * Returns a gateway to the driver's DropWizard metrics, or {@link Optional#empty()} if all + * metrics are disabled, or if the driver has been configured to use MicroProfile or Micrometer + * instead of DropWizard (see {@code advanced.metrics.factory.class} in the configuration). + * + *

      {@link Metrics} was originally intended to allow programmatic access to the metrics, but it + * has a hard dependency to the DropWizard API, which makes it unsuitable for alternative metric + * frameworks. A workaround is to inject your own metric registry with {@link + * SessionBuilder#withMetricRegistry(Object)} when you build the session. You can then use the + * framework's proprietary APIs to retrieve the metrics from the registry. */ @NonNull Optional getMetrics(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java index 587f8c4e225..25500119047 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/session/SessionBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,57 +19,88 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; +import com.datastax.oss.driver.api.core.auth.ProgrammaticPlainTextAuthProvider; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeStateListener; import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory; +import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; import com.datastax.oss.driver.api.core.tracker.RequestTracker; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.api.core.uuid.Uuids; import com.datastax.oss.driver.internal.core.ContactPoints; +import com.datastax.oss.driver.internal.core.config.cloud.CloudConfig; +import com.datastax.oss.driver.internal.core.config.cloud.CloudConfigFactory; import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.internal.core.tracker.W3CContextRequestIdGenerator; import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.InputStream; import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.CompletionStage; import java.util.function.Predicate; -import java.util.function.Supplier; +import javax.net.ssl.SSLContext; import net.jcip.annotations.NotThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Base implementation to build session instances. * *

      You only need to deal with this directly if you use custom driver extensions. For the default * session implementation, see {@link CqlSession#builder()}. + * + *

      This class is mutable and not thread-safe. */ @NotThreadSafe public abstract class SessionBuilder { + public static final String ASTRA_PAYLOAD_KEY = "traceparent"; + + private static final Logger LOG = LoggerFactory.getLogger(SessionBuilder.class); + @SuppressWarnings("unchecked") protected final SelfT self = (SelfT) this; protected DriverConfigLoader configLoader; protected Set programmaticContactPoints = new HashSet<>(); protected CqlIdentifier keyspace; + protected Callable cloudConfigInputStream; protected ProgrammaticArguments.Builder programmaticArgumentsBuilder = ProgrammaticArguments.builder(); + private boolean programmaticSslFactory = false; + private boolean programmaticLocalDatacenter = false; /** * Sets the configuration loader to use. @@ -101,10 +134,20 @@ public SelfT withConfigLoader(@Nullable DriverConfigLoader configLoader) { } @NonNull + @Deprecated protected DriverConfigLoader defaultConfigLoader() { return new DefaultDriverConfigLoader(); } + @NonNull + protected DriverConfigLoader defaultConfigLoader(@Nullable ClassLoader classLoader) { + if (classLoader == null) { + return new DefaultDriverConfigLoader(); + } else { + return new DefaultDriverConfigLoader(classLoader); + } + } + /** * Adds contact points to use for the initial connection to the cluster. * @@ -183,8 +226,11 @@ public SelfT addTypeCodecs(@NonNull TypeCodec... typeCodecs) { /** * Registers a node state listener to use with the session. * - *

      If the listener is specified programmatically with this method, it overrides the - * configuration (that is, the {@code metadata.node-state-listener.class} option will be ignored). + *

      Listeners can be registered in two ways: either programmatically with this method, or via + * the configuration using the {@code advanced.metadata.node-state-listener.classes} option. + * + *

      This method unregisters any previously-registered listener. If you intend to register more + * than one listener, use {@link #addNodeStateListener(NodeStateListener)} instead. */ @NonNull public SelfT withNodeStateListener(@Nullable NodeStateListener nodeStateListener) { @@ -192,12 +238,32 @@ public SelfT withNodeStateListener(@Nullable NodeStateListener nodeStateListener return self; } + /** + * Registers a node state listener to use with the session, without removing previously-registered + * listeners. + * + *

      Listeners can be registered in two ways: either programmatically with this method, or via + * the configuration using the {@code advanced.metadata.node-state-listener.classes} option. + * + *

      Unlike {@link #withNodeStateListener(NodeStateListener)}, this method adds the new listener + * to the list of already-registered listeners, thus allowing applications to register multiple + * listeners. When multiple listeners are registered, they are notified in sequence every time a + * new listener event is triggered. + */ + @NonNull + public SelfT addNodeStateListener(@NonNull NodeStateListener nodeStateListener) { + programmaticArgumentsBuilder.addNodeStateListener(nodeStateListener); + return self; + } + /** * Registers a schema change listener to use with the session. * - *

      If the listener is specified programmatically with this method, it overrides the - * configuration (that is, the {@code metadata.schema-change-listener.class} option will be - * ignored). + *

      Listeners can be registered in two ways: either programmatically with this method, or via + * the configuration using the {@code advanced.metadata.schema-change-listener.classes} option. + * + *

      This method unregisters any previously-registered listener. If you intend to register more + * than one listener, use {@link #addSchemaChangeListener(SchemaChangeListener)} instead. */ @NonNull public SelfT withSchemaChangeListener(@Nullable SchemaChangeListener schemaChangeListener) { @@ -206,10 +272,31 @@ public SelfT withSchemaChangeListener(@Nullable SchemaChangeListener schemaChang } /** - * Register a request tracker to use with the session. + * Registers a schema change listener to use with the session, without removing + * previously-registered listeners. + * + *

      Listeners can be registered in two ways: either programmatically with this method, or via + * the configuration using the {@code advanced.metadata.schema-change-listener.classes} option. * - *

      If the tracker is specified programmatically with this method, it overrides the - * configuration (that is, the {@code request.tracker.class} option will be ignored). + *

      Unlike {@link #withSchemaChangeListener(SchemaChangeListener)}, this method adds the new + * listener to the list of already-registered listeners, thus allowing applications to register + * multiple listeners. When multiple listeners are registered, they are notified in sequence every + * time a new listener event is triggered. + */ + @NonNull + public SelfT addSchemaChangeListener(@NonNull SchemaChangeListener schemaChangeListener) { + programmaticArgumentsBuilder.addSchemaChangeListener(schemaChangeListener); + return self; + } + + /** + * Registers a request tracker to use with the session. + * + *

      Trackers can be registered in two ways: either programmatically with this method, or via the + * configuration using the {@code advanced.request-tracker.classes} option. + * + *

      This method unregisters any previously-registered tracker. If you intend to register more + * than one tracker, use {@link #addRequestTracker(RequestTracker)} instead. */ @NonNull public SelfT withRequestTracker(@Nullable RequestTracker requestTracker) { @@ -217,6 +304,149 @@ public SelfT withRequestTracker(@Nullable RequestTracker requestTracker) { return self; } + /** + * Registers a request tracker to use with the session, without removing previously-registered + * trackers. + * + *

      Trackers can be registered in two ways: either programmatically with this method, or via the + * configuration using the {@code advanced.request-tracker.classes} option. + * + *

      Unlike {@link #withRequestTracker(RequestTracker)}, this method adds the new tracker to the + * list of already-registered trackers, thus allowing applications to register multiple trackers. + * When multiple trackers are registered, they are notified in sequence every time a new tracker + * event is triggered. + */ + @NonNull + public SelfT addRequestTracker(@NonNull RequestTracker requestTracker) { + programmaticArgumentsBuilder.addRequestTracker(requestTracker); + return self; + } + + /** + * Registers a request ID generator. The driver will use the generated ID in the logs and + * optionally add to the custom payload so that users can correlate logs about the same request + * from the Cassandra side. + */ + @NonNull + public SelfT withRequestIdGenerator(@NonNull RequestIdGenerator requestIdGenerator) { + this.programmaticArgumentsBuilder.withRequestIdGenerator(requestIdGenerator); + return self; + } + + /** + * Registers an authentication provider to use with the session. + * + *

      If the provider is specified programmatically with this method, it overrides the + * configuration (that is, the {@code advanced.auth-provider.class} option will be ignored). + */ + @NonNull + public SelfT withAuthProvider(@Nullable AuthProvider authProvider) { + this.programmaticArgumentsBuilder.withAuthProvider(authProvider); + return self; + } + + /** + * Configures the session to use plaintext authentication with the given username and password. + * + *

      This methods calls {@link #withAuthProvider(AuthProvider)} to register a special provider + * implementation. Therefore calling it overrides the configuration (that is, the {@code + * advanced.auth-provider.class} option will be ignored). + * + *

      Note that this approach holds the credentials in clear text in memory, which makes them + * vulnerable to an attacker who is able to perform memory dumps. If this is not acceptable for + * you, consider writing your own {@link AuthProvider} implementation ({@link + * PlainTextAuthProviderBase} is a good starting point), and providing it either with {@link + * #withAuthProvider(AuthProvider)} or via the configuration ({@code + * advanced.auth-provider.class}). + */ + @NonNull + public SelfT withAuthCredentials(@NonNull String username, @NonNull String password) { + return withAuthProvider(new ProgrammaticPlainTextAuthProvider(username, password)); + } + + /** + * Configures the session to use DSE plaintext authentication with the given username and + * password, and perform proxy authentication with the given authorization id. + * + *

      This feature is only available in DataStax Enterprise. If connecting to Apache Cassandra, + * the authorization id will be ignored; it is recommended to use {@link + * #withAuthCredentials(String, String)} instead. + * + *

      This methods calls {@link #withAuthProvider(AuthProvider)} to register a special provider + * implementation. Therefore calling it overrides the configuration (that is, the {@code + * advanced.auth-provider.class} option will be ignored). + * + *

      Note that this approach holds the credentials in clear text in memory, which makes them + * vulnerable to an attacker who is able to perform memory dumps. If this is not acceptable for + * you, consider writing your own {@link AuthProvider} implementation (the internal class {@code + * PlainTextAuthProviderBase} is a good starting point), and providing it either with {@link + * #withAuthProvider(AuthProvider)} or via the configuration ({@code + * advanced.auth-provider.class}). + */ + @NonNull + public SelfT withAuthCredentials( + @NonNull String username, @NonNull String password, @NonNull String authorizationId) { + return withAuthProvider( + new ProgrammaticPlainTextAuthProvider(username, password, authorizationId)); + } + + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #withAuthCredentials(String, String)}. + */ + @Deprecated + @NonNull + public SelfT withCredentials(@NonNull String username, @NonNull String password) { + return withAuthCredentials(username, password); + } + + /** + * @deprecated this method only exists to ease the transition from driver 3, it is an alias for + * {@link #withAuthCredentials(String, String,String)}. + */ + @Deprecated + @NonNull + public SelfT withCredentials( + @NonNull String username, @NonNull String password, @NonNull String authorizationId) { + return withAuthCredentials(username, password, authorizationId); + } + + /** + * Registers an SSL engine factory for the session. + * + *

      If the factory is provided programmatically with this method, it overrides the configuration + * (that is, the {@code advanced.ssl-engine-factory} option will be ignored). + * + * @see ProgrammaticSslEngineFactory + */ + @NonNull + public SelfT withSslEngineFactory(@Nullable SslEngineFactory sslEngineFactory) { + this.programmaticSslFactory = true; + this.programmaticArgumentsBuilder.withSslEngineFactory(sslEngineFactory); + return self; + } + + /** + * Configures the session to use SSL with the given context. + * + *

      This is a convenience method for clients that already have an {@link SSLContext} instance. + * It wraps its argument into a {@link ProgrammaticSslEngineFactory}, and passes it to {@link + * #withSslEngineFactory(SslEngineFactory)}. + * + *

      If you use this method, there is no way to customize cipher suites, or turn on host name + * validation. If you need finer control, use {@link #withSslEngineFactory(SslEngineFactory)} + * directly and pass either your own implementation of {@link SslEngineFactory}, or a {@link + * ProgrammaticSslEngineFactory} created with custom cipher suites and/or host name validation. + * + *

      Also, note that SSL engines will be created with advisory peer information ({@link + * SSLContext#createSSLEngine(String, int)}) whenever possible. + */ + @NonNull + public SelfT withSslContext(@Nullable SSLContext sslContext) { + return withSslEngineFactory( + sslContext == null ? null : new ProgrammaticSslEngineFactory(sslContext)); + } + /** * Specifies the datacenter that is considered "local" by the load balancing policy. * @@ -229,6 +459,7 @@ public SelfT withRequestTracker(@Nullable RequestTracker requestTracker) { * if you use a third-party implementation, refer to their documentation. */ public SelfT withLocalDatacenter(@NonNull String profileName, @NonNull String localDatacenter) { + this.programmaticLocalDatacenter = true; this.programmaticArgumentsBuilder.withLocalDatacenter(profileName, localDatacenter); return self; } @@ -238,6 +469,36 @@ public SelfT withLocalDatacenter(@NonNull String localDatacenter) { return withLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME, localDatacenter); } + /** + * Adds a custom {@link NodeDistanceEvaluator} for a particular execution profile. This assumes + * that you're also using a dedicated load balancing policy for that profile. + * + *

      Node distance evaluators are honored by all the driver built-in load balancing policies. If + * you use a custom policy implementation however, you'll need to explicitly invoke the evaluator + * whenever appropriate. + * + *

      If an evaluator is specified programmatically with this method, it overrides the + * configuration (that is, the {@code load-balancing-policy.evaluator.class} option will be + * ignored). + * + * @see #withNodeDistanceEvaluator(NodeDistanceEvaluator) + */ + @NonNull + public SelfT withNodeDistanceEvaluator( + @NonNull String profileName, @NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { + this.programmaticArgumentsBuilder.withNodeDistanceEvaluator(profileName, nodeDistanceEvaluator); + return self; + } + + /** + * Alias to {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} for the default + * profile. + */ + @NonNull + public SelfT withNodeDistanceEvaluator(@NonNull NodeDistanceEvaluator nodeDistanceEvaluator) { + return withNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME, nodeDistanceEvaluator); + } + /** * Adds a custom filter to include/exclude nodes for a particular execution profile. This assumes * that you're also using a dedicated load balancing policy for that profile. @@ -247,21 +508,60 @@ public SelfT withLocalDatacenter(@NonNull String localDatacenter) { * policy will suggest distance IGNORED (meaning the driver won't ever connect to it if all * policies agree), and never included in any query plan. * - *

      Note that this behavior is implemented in the default load balancing policy. If you use a - * custom policy implementation, you'll need to explicitly invoke the filter. + *

      Note that this behavior is implemented in the driver built-in load balancing policies. If + * you use a custom policy implementation, you'll need to explicitly invoke the filter. * *

      If the filter is specified programmatically with this method, it overrides the configuration * (that is, the {@code load-balancing-policy.filter.class} option will be ignored). * + *

      This method has been deprecated in favor of {@link + * #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)}. If you were using node + * filters, you can easily replace your filters with the following implementation of {@link + * NodeDistanceEvaluator}: + * + *

      {@code
      +   * public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator {
      +   *
      +   *   private final Predicate nodeFilter;
      +   *
      +   *   public NodeFilterToDistanceEvaluatorAdapter(Predicate nodeFilter) {
      +   *     this.nodeFilter = nodeFilter;
      +   *   }
      +   *
      +   *   public NodeDistance evaluateDistance(Node node, String localDc) {
      +   *     return nodeFilter.test(node) ? null : NodeDistance.IGNORED;
      +   *   }
      +   * }
      +   * }
      + * + * The same can be achieved using a lambda + closure: + * + *
      {@code
      +   * Predicate nodeFilter = ...
      +   * NodeDistanceEvaluator evaluator =
      +   *   (node, localDc) -> nodeFilter.test(node) ? null : NodeDistance.IGNORED;
      +   * }
      + * * @see #withNodeFilter(Predicate) + * @deprecated Use {@link #withNodeDistanceEvaluator(String, NodeDistanceEvaluator)} instead. */ + @Deprecated @NonNull public SelfT withNodeFilter(@NonNull String profileName, @NonNull Predicate nodeFilter) { this.programmaticArgumentsBuilder.withNodeFilter(profileName, nodeFilter); return self; } - /** Alias to {@link #withNodeFilter(String, Predicate)} for the default profile. */ + /** + * Alias to {@link #withNodeFilter(String, Predicate)} for the default profile. + * + *

      This method has been deprecated in favor of {@link + * #withNodeDistanceEvaluator(NodeDistanceEvaluator)}. See the javadocs of {@link + * #withNodeFilter(String, Predicate)} to understand how to migrate your legacy node filters. + * + * @deprecated Use {@link #withNodeDistanceEvaluator(NodeDistanceEvaluator)} instead. + */ + @Deprecated @NonNull public SelfT withNodeFilter(@NonNull Predicate nodeFilter) { return withNodeFilter(DriverExecutionProfile.DEFAULT_NAME, nodeFilter); @@ -291,11 +591,49 @@ public SelfT withKeyspace(@Nullable String keyspaceName) { /** * The {@link ClassLoader} to use to reflectively load class names defined in configuration. * - *

      This is typically only needed when using OSGi or other in environments where there are - * complex class loading requirements. + *

      Unless you define a custom {@link #configLoader}, this class loader will also be used to + * locate application-specific configuration resources. + * + *

      If you do not provide any custom class loader, the driver will attempt to use the following + * ones: + * + *

        + *
      1. When reflectively loading class names defined in configuration: same class loader that + * loaded the core driver classes. + *
      2. When locating application-specific configuration resources: the current thread's + * {@linkplain Thread#getContextClassLoader() context class loader}. + *
      + * + * This is generally the right thing to do. * - *

      If null, the driver attempts to use {@link Thread#getContextClassLoader()} of the current - * thread or the same {@link ClassLoader} that loaded the core driver classes. + *

      Defining a different class loader is typically only needed in web or OSGi environments where + * there are complex class loading requirements. + * + *

      For example, if the driver jar is loaded by the web server's system class loader (that is, + * the driver jar was placed in the "/lib" folder of the web server), but the application tries to + * load a custom load balancing policy declared in the web app's "WEB-INF/lib" folder, the system + * class loader will not be able to load such class. Instead, you must use the web app's class + * loader, that you can obtain by calling {@link Thread#getContextClassLoader()}: + * + *

      {@code
      +   * CqlSession.builder()
      +   *   .addContactEndPoint(...)
      +   *   .withClassLoader(Thread.currentThread().getContextClassLoader())
      +   *   .build();
      +   * }
      + * + * Indeed, in most web environments, {@code Thread.currentThread().getContextClassLoader()} will + * return the web app's class loader, which is a child of the web server's system class loader. + * This class loader is thus capable of loading both the implemented interface and the + * implementing class, in spite of them being declared in different places. + * + *

      For OSGi deployments, it is usually not necessary to use this method. Even if the + * implemented interface and the implementing class are located in different bundles, the right + * class loader to use should be the default one (the driver bundle's class loader). In + * particular, it is not advised to rely on {@code Thread.currentThread().getContextClassLoader()} + * in OSGi environments, so you should never pass that class loader to this method. See Using + * a custom ClassLoader in our OSGi online docs for more information. */ @NonNull public SelfT withClassLoader(@Nullable ClassLoader classLoader) { @@ -303,9 +641,192 @@ public SelfT withClassLoader(@Nullable ClassLoader classLoader) { return self; } + /** + * Configures this SessionBuilder for Cloud deployments by retrieving connection information from + * the provided {@link Path}. + * + *

      To connect to a Cloud database, you must first download the secure database bundle from the + * DataStax Astra console that contains the connection information, then instruct the driver to + * read its contents using either this method or one if its variants. + * + *

      For more information, please refer to the DataStax Astra documentation. + * + * @param cloudConfigPath Path to the secure connect bundle zip file. + * @see #withCloudSecureConnectBundle(URL) + * @see #withCloudSecureConnectBundle(InputStream) + */ + @NonNull + public SelfT withCloudSecureConnectBundle(@NonNull Path cloudConfigPath) { + try { + URL cloudConfigUrl = cloudConfigPath.toAbsolutePath().normalize().toUri().toURL(); + this.cloudConfigInputStream = cloudConfigUrl::openStream; + } catch (MalformedURLException e) { + throw new IllegalArgumentException("Incorrect format of cloudConfigPath", e); + } + return self; + } + + /** + * Registers a CodecRegistry to use for the session. + * + *

      When both this and {@link #addTypeCodecs(TypeCodec[])} are called, the added type codecs + * will be registered on the provided CodecRegistry. + */ + @NonNull + public SelfT withCodecRegistry(@Nullable MutableCodecRegistry codecRegistry) { + this.programmaticArgumentsBuilder.withCodecRegistry(codecRegistry); + return self; + } + + /** + * Configures this SessionBuilder for Cloud deployments by retrieving connection information from + * the provided {@link URL}. + * + *

      To connect to a Cloud database, you must first download the secure database bundle from the + * DataStax Astra console that contains the connection information, then instruct the driver to + * read its contents using either this method or one if its variants. + * + *

      For more information, please refer to the DataStax Astra documentation. + * + * @param cloudConfigUrl URL to the secure connect bundle zip file. + * @see #withCloudSecureConnectBundle(Path) + * @see #withCloudSecureConnectBundle(InputStream) + */ + @NonNull + public SelfT withCloudSecureConnectBundle(@NonNull URL cloudConfigUrl) { + this.cloudConfigInputStream = cloudConfigUrl::openStream; + return self; + } + + /** + * Configures this SessionBuilder for Cloud deployments by retrieving connection information from + * the provided {@link InputStream}. + * + *

      To connect to a Cloud database, you must first download the secure database bundle from the + * DataStax Astra console that contains the connection information, then instruct the driver to + * read its contents using either this method or one if its variants. + * + *

      For more information, please refer to the DataStax Astra documentation. + * + *

      Note that the provided stream will be consumed and closed when either {@link + * #build()} or {@link #buildAsync()} are called; attempting to reuse it afterwards will result in + * an error being thrown. + * + * @param cloudConfigInputStream A stream containing the secure connect bundle zip file. + * @see #withCloudSecureConnectBundle(Path) + * @see #withCloudSecureConnectBundle(URL) + */ + @NonNull + public SelfT withCloudSecureConnectBundle(@NonNull InputStream cloudConfigInputStream) { + this.cloudConfigInputStream = () -> cloudConfigInputStream; + return self; + } + + /** + * Configures this SessionBuilder to use the provided Cloud proxy endpoint. + * + *

      Normally, this method should not be called directly; the normal and easiest way to configure + * the driver for Cloud deployments is through a {@linkplain #withCloudSecureConnectBundle(URL) + * secure connect bundle}. + * + *

      Setting this option to any non-null address will make the driver use a special topology + * monitor tailored for Cloud deployments. This topology monitor assumes that the target cluster + * should be contacted through the proxy specified here, using SNI routing. + * + *

      For more information, please refer to the DataStax Astra documentation. + * + * @param cloudProxyAddress The address of the Cloud proxy to use. + * @see Server Name Indication + */ + @NonNull + public SelfT withCloudProxyAddress(@Nullable InetSocketAddress cloudProxyAddress) { + this.programmaticArgumentsBuilder.withCloudProxyAddress(cloudProxyAddress); + return self; + } + + /** + * A unique identifier for the created session. + * + *

      It will be sent in the {@code STARTUP} protocol message, under the key {@code CLIENT_ID}, + * for each new connection established by the driver. Currently, this information is used by + * Insights monitoring (if the target cluster does not support Insights, the entry will be ignored + * by the server). + * + *

      If you don't call this method, the driver will generate an identifier with {@link + * Uuids#random()}. + */ + @NonNull + public SelfT withClientId(@Nullable UUID clientId) { + this.programmaticArgumentsBuilder.withStartupClientId(clientId); + return self; + } + + /** + * The name of the application using the created session. + * + *

      It will be sent in the {@code STARTUP} protocol message, under the key {@code + * APPLICATION_NAME}, for each new connection established by the driver. Currently, this + * information is used by Insights monitoring (if the target cluster does not support Insights, + * the entry will be ignored by the server). + * + *

      This can also be defined in the driver configuration with the option {@code + * basic.application.name}; if you specify both, this method takes precedence and the + * configuration option will be ignored. If neither is specified, the entry is not included in the + * message. + */ + @NonNull + public SelfT withApplicationName(@Nullable String applicationName) { + this.programmaticArgumentsBuilder.withStartupApplicationName(applicationName); + return self; + } + + /** + * The version of the application using the created session. + * + *

      It will be sent in the {@code STARTUP} protocol message, under the key {@code + * APPLICATION_VERSION}, for each new connection established by the driver. Currently, this + * information is used by Insights monitoring (if the target cluster does not support Insights, + * the entry will be ignored by the server). + * + *

      This can also be defined in the driver configuration with the option {@code + * basic.application.version}; if you specify both, this method takes precedence and the + * configuration option will be ignored. If neither is specified, the entry is not included in the + * message. + */ + @NonNull + public SelfT withApplicationVersion(@Nullable String applicationVersion) { + this.programmaticArgumentsBuilder.withStartupApplicationVersion(applicationVersion); + return self; + } + + /** + * The metric registry object for storing driver metrics. + * + *

      The argument should be an instance of the base registry type for the metrics framework you + * are using (see {@code advanced.metrics.factory.class} in the configuration): + * + *

        + *
      • Dropwizard (the default): {@code com.codahale.metrics.MetricRegistry} + *
      • Micrometer: {@code io.micrometer.core.instrument.MeterRegistry} + *
      • MicroProfile: {@code org.eclipse.microprofile.metrics.MetricRegistry} + *
      + * + * Only MicroProfile requires an external instance of its registry to be provided. For + * Micrometer, if no Registry object is provided, Micrometer's {@code globalRegistry} will be + * used. For Dropwizard, if no Registry object is provided, an instance of {@code MetricRegistry} + * will be created and used. + */ + @NonNull + public SelfT withMetricRegistry(@Nullable Object metricRegistry) { + this.programmaticArgumentsBuilder.withMetricRegistry(metricRegistry); + return self; + } + /** * Creates the session with the options set by this builder. * + *

      The session initialization will happen asynchronously in a driver internal thread pool. + * * @return a completion stage that completes with the session when it is fully initialized. */ @NonNull @@ -316,10 +837,13 @@ public CompletionStage buildAsync() { CompletableFutures.propagateCancellation(wrapStage, buildStage); return wrapStage; } - /** * Convenience method to call {@link #buildAsync()} and block on the result. * + *

      Usage in non-blocking applications: beware that session initialization is a costly + * operation. It should only be triggered from a thread that is allowed to block. If that is not + * the case, consider using {@link #buildAsync()} instead. + * *

      This must not be called on a driver thread. */ @NonNull @@ -333,11 +857,62 @@ public SessionT build() { @NonNull protected final CompletionStage buildDefaultSessionAsync() { try { - DriverConfigLoader configLoader = buildIfNull(this.configLoader, this::defaultConfigLoader); + + ProgrammaticArguments programmaticArguments = programmaticArgumentsBuilder.build(); + + DriverConfigLoader configLoader = + this.configLoader != null + ? this.configLoader + : defaultConfigLoader(programmaticArguments.getClassLoader()); DriverExecutionProfile defaultConfig = configLoader.getInitialConfig().getDefaultProfile(); + if (cloudConfigInputStream == null) { + String configUrlString = + defaultConfig.getString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, null); + if (configUrlString != null) { + cloudConfigInputStream = () -> getURL(configUrlString).openStream(); + } + } List configContactPoints = defaultConfig.getStringList(DefaultDriverOption.CONTACT_POINTS, Collections.emptyList()); + if (cloudConfigInputStream != null) { + // override request id generator, unless user has already set it + if (programmaticArguments.getRequestIdGenerator() == null) { + programmaticArgumentsBuilder.withRequestIdGenerator( + new W3CContextRequestIdGenerator(ASTRA_PAYLOAD_KEY)); + LOG.debug( + "A secure connect bundle is provided, using W3CContextRequestIdGenerator as request ID generator."); + } + if (!programmaticContactPoints.isEmpty() || !configContactPoints.isEmpty()) { + LOG.info( + "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); + // clear the contact points provided in the setting file and via addContactPoints + configContactPoints = Collections.emptyList(); + programmaticContactPoints = new HashSet<>(); + } + + if (programmaticSslFactory + || defaultConfig.isDefined(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS)) { + LOG.info( + "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); + } + CloudConfig cloudConfig = + new CloudConfigFactory().createCloudConfig(cloudConfigInputStream.call()); + addContactEndPoints(cloudConfig.getEndPoints()); + + boolean localDataCenterDefined = + anyProfileHasDatacenterDefined(configLoader.getInitialConfig()); + if (programmaticLocalDatacenter || localDataCenterDefined) { + LOG.info( + "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); + programmaticArgumentsBuilder.clearDatacenters(); + } + withLocalDatacenter(cloudConfig.getLocalDatacenter()); + withSslEngineFactory(cloudConfig.getSslEngineFactory()); + withCloudProxyAddress(cloudConfig.getProxyAddress()); + programmaticArguments = programmaticArgumentsBuilder.build(); + } + boolean resolveAddresses = defaultConfig.getBoolean(DefaultDriverOption.RESOLVE_CONTACT_POINTS, true); @@ -350,7 +925,7 @@ protected final CompletionStage buildDefaultSessionAsync() { } return DefaultSession.init( - (InternalDriverContext) buildContext(configLoader, programmaticArgumentsBuilder.build()), + (InternalDriverContext) buildContext(configLoader, programmaticArguments), contactPoints, keyspace); @@ -361,6 +936,36 @@ protected final CompletionStage buildDefaultSessionAsync() { } } + private boolean anyProfileHasDatacenterDefined(DriverConfig driverConfig) { + for (DriverExecutionProfile driverExecutionProfile : driverConfig.getProfiles().values()) { + if (driverExecutionProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { + return true; + } + } + return false; + } + + /** + * Returns URL based on the configUrl setting. If the configUrl has no protocol provided, the + * method will fallback to file:// protocol and return URL that has file protocol specified. + * + * @param configUrl url to config secure bundle + * @return URL with file protocol if there was not explicit protocol provided in the configUrl + * setting + */ + private URL getURL(String configUrl) throws MalformedURLException { + try { + return new URL(configUrl); + } catch (MalformedURLException e1) { + try { + return Paths.get(configUrl).toAbsolutePath().normalize().toUri().toURL(); + } catch (MalformedURLException e2) { + e2.addSuppressed(e1); + throw e2; + } + } + } + /** * This must return an instance of {@code InternalDriverContext} (it's not expressed * directly in the signature to avoid leaking that type through the protected API). @@ -405,8 +1010,4 @@ protected DriverContext buildContext( ClassLoader classLoader) { return null; } - - private static T buildIfNull(T value, Supplier builder) { - return (value == null) ? builder.get() : value; - } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java index 21ae3b5e396..73d347d533e 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +20,14 @@ import edu.umd.cs.findbugs.annotations.NonNull; import java.io.Closeable; -/** Limits the number of concurrent requests executed by the driver. */ +/** + * Limits the number of concurrent requests executed by the driver. + * + *

      Usage in non-blocking applications: beware that some implementations of this interface use + * locks for internal coordination, and do not qualify as lock-free. If your application enforces + * strict lock-freedom, then you should use the {@code PassThroughRequestThrottler} or the {@code + * ConcurrencyLimitingRequestThrottler}. + */ public interface RequestThrottler extends Closeable { /** @@ -47,4 +56,12 @@ public interface RequestThrottler extends Closeable { * perform time-based eviction on pending requests. */ void signalTimeout(@NonNull Throttled request); + + /** + * Signals that a request has been cancelled. This indicates to the throttler that another request + * might be started. + */ + default void signalCancel(@NonNull Throttled request) { + // no-op for backward compatibility purposes + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java index f64ec481743..6fd562804da 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/session/throttling/Throttled.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java index 149a2f3c285..163204ba62d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java new file mode 100644 index 00000000000..d65eaa864aa --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.ssl; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; + +/** + * An SSL engine factory that allows you to configure the driver programmatically, by passing your + * own {@link SSLContext}. + * + *

      Note that this class will create SSL engines with advisory peer information ({@link + * SSLContext#createSSLEngine(String, int)}) whenever possible. + * + *

      If those defaults do not work for you, it should be pretty straightforward to write your own + * implementation by extending or duplicating this class. + * + * @see SessionBuilder#withSslEngineFactory(SslEngineFactory) + * @see SessionBuilder#withSslContext(SSLContext) + */ +public class ProgrammaticSslEngineFactory implements SslEngineFactory { + + protected final SSLContext sslContext; + protected final String[] cipherSuites; + protected final boolean requireHostnameValidation; + protected final boolean allowDnsReverseLookupSan; + + /** + * Creates an instance with the given {@link SSLContext}, default cipher suites and no host name + * validation. + * + * @param sslContext the {@link SSLContext} to use. + */ + public ProgrammaticSslEngineFactory(@NonNull SSLContext sslContext) { + this(sslContext, null); + } + + /** + * Creates an instance with the given {@link SSLContext} and cipher suites, and no host name + * validation. + * + * @param sslContext the {@link SSLContext} to use. + * @param cipherSuites the cipher suites to use, or null to use the default ones. + */ + public ProgrammaticSslEngineFactory( + @NonNull SSLContext sslContext, @Nullable String[] cipherSuites) { + this(sslContext, cipherSuites, false); + } + + /** + * Creates an instance with the given {@link SSLContext}, cipher suites and host name validation. + * + * @param sslContext the {@link SSLContext} to use. + * @param cipherSuites the cipher suites to use, or null to use the default ones. + * @param requireHostnameValidation whether to enable host name validation. If enabled, host name + * validation will be done using HTTPS algorithm. + */ + public ProgrammaticSslEngineFactory( + @NonNull SSLContext sslContext, + @Nullable String[] cipherSuites, + boolean requireHostnameValidation) { + this(sslContext, cipherSuites, requireHostnameValidation, true); + } + + /** + * Creates an instance with the given {@link SSLContext}, cipher suites and host name validation. + * + * @param sslContext the {@link SSLContext} to use. + * @param cipherSuites the cipher suites to use, or null to use the default ones. + * @param requireHostnameValidation whether to enable host name validation. If enabled, host name + * validation will be done using HTTPS algorithm. + * @param allowDnsReverseLookupSan whether to allow raw server IPs to be DNS reverse-resolved to + * choose the appropriate Subject Alternative Name. + */ + public ProgrammaticSslEngineFactory( + @NonNull SSLContext sslContext, + @Nullable String[] cipherSuites, + boolean requireHostnameValidation, + boolean allowDnsReverseLookupSan) { + this.sslContext = sslContext; + this.cipherSuites = cipherSuites; + this.requireHostnameValidation = requireHostnameValidation; + this.allowDnsReverseLookupSan = allowDnsReverseLookupSan; + } + + @NonNull + @Override + public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { + SSLEngine engine; + SocketAddress remoteAddress = remoteEndpoint.resolve(); + if (remoteAddress instanceof InetSocketAddress) { + InetSocketAddress socketAddress = (InetSocketAddress) remoteAddress; + engine = + sslContext.createSSLEngine( + allowDnsReverseLookupSan + ? socketAddress.getHostName() + : socketAddress.getHostString(), + socketAddress.getPort()); + } else { + engine = sslContext.createSSLEngine(); + } + engine.setUseClientMode(true); + if (cipherSuites != null) { + engine.setEnabledCipherSuites(cipherSuites); + } + if (requireHostnameValidation) { + SSLParameters parameters = engine.getSSLParameters(); + parameters.setEndpointIdentificationAlgorithm("HTTPS"); + engine.setSSLParameters(parameters); + } + return engine; + } + + @Override + public void close() { + // nothing to do + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java index a001c696fe0..db4f18a97b9 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/SslEngineFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java index 343a8ab0360..a0cb3e73397 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/ssl/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java index cc2fd76016f..b1139dd9f4d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/time/TimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.oss.driver.api.core.time; +import com.datastax.oss.driver.api.core.cql.Statement; + /** * Generates client-side, microsecond-precision query timestamps. * @@ -31,8 +35,9 @@ public interface TimestampGenerator extends AutoCloseable { * returned value if the clock tick hasn't changed, and possibly drifting in the future. See the * built-in driver implementations for more details. * - * @return the next timestamp, or {@link Long#MIN_VALUE} to indicate that the driver should not - * send one with the query (and let Cassandra generate a server-side timestamp). + * @return the next timestamp, or {@link Statement#NO_DEFAULT_TIMESTAMP} to indicate that the + * driver should not send one with the query (and let Cassandra generate a server-side + * timestamp). */ long next(); } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java new file mode 100644 index 00000000000..21db3793b01 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestIdGenerator.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.tracker; + +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Request; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Interface responsible for generating request IDs. + * + *

      Note that all request IDs have a parent/child relationship. A "session request ID" can loosely + * be thought of as encompassing a sequence of a request + any attendant retries, speculative + * executions etc. It's scope is identical to that of a {@link + * com.datastax.oss.driver.internal.core.cql.CqlRequestHandler}. A "node request ID" represents a + * single request within this larger scope. Note that a request corresponding to a request ID may be + * retried; in that case the retry count will be appended to the corresponding identifier in the + * logs. + */ +public interface RequestIdGenerator { + + String DEFAULT_PAYLOAD_KEY = "request-id"; + + /** + * Generates a unique identifier for the session request. This will be the identifier for the + * entire `session.execute()` call. This identifier will be added to logs, and propagated to + * request trackers. + * + * @return a unique identifier for the session request + */ + String getSessionRequestId(); + + /** + * Generates a unique identifier for the node request. This will be the identifier for the CQL + * request against a particular node. There can be one or more node requests for a single session + * request, due to retries or speculative executions. This identifier will be added to logs, and + * propagated to request trackers. + * + * @param statement the statement to be executed + * @param parentId the session request identifier + * @return a unique identifier for the node request + */ + String getNodeRequestId(@NonNull Request statement, @NonNull String parentId); + + default String getCustomPayloadKey() { + return DEFAULT_PAYLOAD_KEY; + } + + default Statement getDecoratedStatement( + @NonNull Statement statement, @NonNull String requestId) { + + Map existing = new HashMap<>(statement.getCustomPayload()); + String key = getCustomPayloadKey(); + + // Add or overwrite + existing.put(key, ByteBuffer.wrap(requestId.getBytes(StandardCharsets.UTF_8))); + + // Allowing null key/values + // Wrap a map inside to be immutable without instanciating a new map + Map unmodifiableMap = Collections.unmodifiableMap(existing); + + return statement.setCustomPayload(unmodifiableMap); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java index a80255819a9..065b41e496a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/tracker/RequestTracker.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,9 +29,9 @@ /** * Tracks request execution for a session. * - *

      There is exactly one tracker per {@link Session}. It can be provided either via the - * configuration (see {@code reference.conf} in the manual or core driver JAR), or programmatically - * via {@link SessionBuilder#withRequestTracker(RequestTracker)}. + *

      Implementations of this interface can be registered either via the configuration (see {@code + * reference.conf} in the manual or core driver JAR), or programmatically via {@link + * SessionBuilder#addRequestTracker(RequestTracker)}. */ public interface RequestTracker extends AutoCloseable { @@ -45,21 +47,22 @@ default void onSuccess( @NonNull Node node) {} /** - * Invoked each time a request succeeds. + * Invoked each time a session request succeeds. A session request is a `session.execute()` call * * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, * GenericType) session.execute} call until the result is made available to the client). * @param executionProfile the execution profile of this request. * @param node the node that returned the successful response. - * @param requestLogPrefix the dedicated log prefix for this request + * @param sessionRequestLogPrefix the dedicated log prefix for this request */ default void onSuccess( @NonNull Request request, long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String requestLogPrefix) { - // If client doesn't override onSuccess with requestLogPrefix delegate call to the old method + @NonNull String sessionRequestLogPrefix) { + // If client doesn't override onSuccess with sessionRequestLogPrefix delegate call to the old + // method onSuccess(request, latencyNanos, executionProfile, node); } @@ -76,13 +79,13 @@ default void onError( @Nullable Node node) {} /** - * Invoked each time a request fails. + * Invoked each time a session request fails. A session request is a `session.execute()` call * * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, * GenericType) session.execute} call until the error is propagated to the client). * @param executionProfile the execution profile of this request. * @param node the node that returned the error response, or {@code null} if the error occurred - * @param requestLogPrefix the dedicated log prefix for this request + * @param sessionRequestLogPrefix the dedicated log prefix for this request */ default void onError( @NonNull Request request, @@ -90,8 +93,9 @@ default void onError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @Nullable Node node, - @NonNull String requestLogPrefix) { - // If client doesn't override onError with requestLogPrefix delegate call to the old method + @NonNull String sessionRequestLogPrefix) { + // If client doesn't override onError with sessionRequestLogPrefix delegate call to the old + // method onError(request, error, latencyNanos, executionProfile, node); } @@ -108,14 +112,15 @@ default void onNodeError( @NonNull Node node) {} /** - * Invoked each time a request fails at the node level. Similar to {@link #onError(Request, - * Throwable, long, DriverExecutionProfile, Node, String)} but at a per node level. + * Invoked each time a node request fails. A node request is a CQL request sent to a particular + * node. There can be one or more node requests for a single session request, due to retries or + * speculative executions. * * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, * GenericType) session.execute} call until the error is propagated to the client). * @param executionProfile the execution profile of this request. * @param node the node that returned the error response. - * @param requestLogPrefix the dedicated log prefix for this request + * @param nodeRequestLogPrefix the dedicated log prefix for this request */ default void onNodeError( @NonNull Request request, @@ -123,8 +128,9 @@ default void onNodeError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String requestLogPrefix) { - // If client doesn't override onNodeError with requestLogPrefix delegate call to the old method + @NonNull String nodeRequestLogPrefix) { + // If client doesn't override onNodeError with nodeRequestLogPrefix delegate call to the old + // method onNodeError(request, error, latencyNanos, executionProfile, node); } @@ -140,23 +146,45 @@ default void onNodeSuccess( @NonNull Node node) {} /** - * Invoked each time a request succeeds at the node level. Similar to {@link #onSuccess(Request, - * long, DriverExecutionProfile, Node, String)} but at per node level. + * Invoked each time a node request succeeds. A node request is a CQL request sent to a particular + * node. There can be one or more node requests for a single session request, due to retries or + * speculative executions. * * @param latencyNanos the overall execution time (from the {@link Session#execute(Request, * GenericType) session.execute} call until the result is made available to the client). * @param executionProfile the execution profile of this request. * @param node the node that returned the successful response. - * @param requestLogPrefix the dedicated log prefix for this request + * @param nodeRequestLogPrefix the dedicated log prefix for this request */ default void onNodeSuccess( @NonNull Request request, long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String requestLogPrefix) { - // If client doesn't override onNodeSuccess with requestLogPrefix delegate call to the old + @NonNull String nodeRequestLogPrefix) { + // If client doesn't override onNodeSuccess with nodeRequestLogPrefix delegate call to the old // method onNodeSuccess(request, latencyNanos, executionProfile, node); } + + /** + * Invoked when the session is ready to process user requests. + * + *

      WARNING: if you use {@code session.execute()} in your tracker implementation, keep in + * mind that those requests will in turn recurse back into {@code onSuccess} / {@code onError} + * methods. Make sure you don't trigger an infinite loop; one way to do that is to use a + * custom execution profile for internal requests. + * + *

      This corresponds to the moment when {@link SessionBuilder#build()} returns, or the future + * returned by {@link SessionBuilder#buildAsync()} completes. If the session initialization fails, + * this method will not get called. + * + *

      Listener methods are invoked from different threads; if you store the session in a field, + * make it at least volatile to guarantee proper publication. + * + *

      This method is guaranteed to be the first one invoked on this object. + * + *

      The default implementation is empty. + */ + default void onSessionReady(@NonNull Session session) {} } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java new file mode 100644 index 00000000000..93e92ec2c2b --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/ContainerType.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.type; + +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * Representation of a type which "contains" some other type. This might be a collection type or it + * could be some other kind of container; the term is deliberately left somewhat vague. + */ +public interface ContainerType { + + @NonNull + DataType getElementType(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java index bc4f5ce7c5a..93f913a584d 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/CustomType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java index 87dc34144f7..92e5cc5edf0 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java index 0a61314ca71..492fc121c71 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/DataTypes.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +17,15 @@ */ package com.datastax.oss.driver.api.core.type; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; import com.datastax.oss.driver.api.core.detach.Detachable; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameParser; import com.datastax.oss.driver.internal.core.type.DefaultCustomType; import com.datastax.oss.driver.internal.core.type.DefaultListType; import com.datastax.oss.driver.internal.core.type.DefaultMapType; import com.datastax.oss.driver.internal.core.type.DefaultSetType; import com.datastax.oss.driver.internal.core.type.DefaultTupleType; +import com.datastax.oss.driver.internal.core.type.DefaultVectorType; import com.datastax.oss.driver.internal.core.type.PrimitiveType; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.protocol.internal.ProtocolConstants; @@ -51,14 +56,18 @@ public class DataTypes { public static final DataType TINYINT = new PrimitiveType(ProtocolConstants.DataType.TINYINT); public static final DataType DURATION = new PrimitiveType(ProtocolConstants.DataType.DURATION); + private static final DataTypeClassNameParser classNameParser = new DataTypeClassNameParser(); + @NonNull public static DataType custom(@NonNull String className) { + // In protocol v4, duration is implemented as a custom type - if ("org.apache.cassandra.db.marshal.DurationType".equals(className)) { - return DURATION; - } else { - return new DefaultCustomType(className); - } + if (className.equals("org.apache.cassandra.db.marshal.DurationType")) return DURATION; + + /* Vector support is currently implemented as a custom type but is also parameterized */ + if (className.startsWith(DefaultVectorType.VECTOR_CLASS_NAME)) + return classNameParser.parse(className, AttachmentPoint.NONE); + return new DefaultCustomType(className); } @NonNull @@ -118,4 +127,8 @@ public static MapType frozenMapOf(@NonNull DataType keyType, @NonNull DataType v public static TupleType tupleOf(@NonNull DataType... componentTypes) { return new DefaultTupleType(ImmutableList.copyOf(Arrays.asList(componentTypes))); } + + public static VectorType vectorOf(DataType subtype, int dimensions) { + return new DefaultVectorType(subtype, dimensions); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java index 1bafb1693d7..ca377d10bbf 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/ListType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,10 +20,7 @@ import com.datastax.oss.protocol.internal.ProtocolConstants; import edu.umd.cs.findbugs.annotations.NonNull; -public interface ListType extends DataType { - - @NonNull - DataType getElementType(); +public interface ListType extends DataType, ContainerType { boolean isFrozen(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java index e79990f0782..f3bca2ac6a4 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/MapType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java index eadd0a702e3..fa902c72bb8 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/SetType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,10 +20,7 @@ import com.datastax.oss.protocol.internal.ProtocolConstants; import edu.umd.cs.findbugs.annotations.NonNull; -public interface SetType extends DataType { - - @NonNull - DataType getElementType(); +public interface SetType extends DataType, ContainerType { boolean isFrozen(); diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java index a22bca8856d..9e2736ddce8 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/TupleType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java index b032151cc0e..4d4768a8ae4 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/UserDefinedType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,9 +23,11 @@ import com.datastax.oss.driver.api.core.metadata.schema.Describable; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.internal.core.metadata.schema.ScriptBuilder; +import com.datastax.oss.driver.internal.core.util.Loggers; import com.datastax.oss.protocol.internal.ProtocolConstants; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; import java.util.List; public interface UserDefinedType extends DataType, Describable { @@ -39,9 +43,41 @@ public interface UserDefinedType extends DataType, Describable { @NonNull List getFieldNames(); - int firstIndexOf(CqlIdentifier id); + /** + * @apiNote the default implementation only exists for backward compatibility. It wraps the result + * of {@link #firstIndexOf(CqlIdentifier)} in a singleton list, which is not entirely correct, + * as it will only return the first occurrence. Therefore it also logs a warning. + *

      Implementors should always override this method (all built-in driver implementations + * do). + */ + @NonNull + default List allIndicesOf(@NonNull CqlIdentifier id) { + Loggers.USER_DEFINED_TYPE.warn( + "{} should override allIndicesOf(CqlIdentifier), the default implementation is a " + + "workaround for backward compatibility, it only returns the first occurrence", + getClass().getName()); + return Collections.singletonList(firstIndexOf(id)); + } + + int firstIndexOf(@NonNull CqlIdentifier id); + + /** + * @apiNote the default implementation only exists for backward compatibility. It wraps the result + * of {@link #firstIndexOf(String)} in a singleton list, which is not entirely correct, as it + * will only return the first occurrence. Therefore it also logs a warning. + *

      Implementors should always override this method (all built-in driver implementations + * do). + */ + @NonNull + default List allIndicesOf(@NonNull String name) { + Loggers.USER_DEFINED_TYPE.warn( + "{} should override allIndicesOf(String), the default implementation is a " + + "workaround for backward compatibility, it only returns the first occurrence", + getClass().getName()); + return Collections.singletonList(firstIndexOf(name)); + } - int firstIndexOf(String name); + int firstIndexOf(@NonNull String name); default boolean contains(@NonNull CqlIdentifier id) { return firstIndexOf(id) >= 0; diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java new file mode 100644 index 00000000000..1d7c13807ec --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/VectorType.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.type; + +/** + * Type representing a Cassandra vector type as described in CEP-30. At the moment this is + * implemented as a custom type so we include the CustomType interface as well. + */ +public interface VectorType extends CustomType, ContainerType { + + int getDimensions(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java index 4d46f253915..4f45af0924f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/CodecNotFoundException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,7 @@ */ package com.datastax.oss.driver.api.core.type.codec; +import com.datastax.oss.driver.api.core.DriverException; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.api.core.type.reflect.GenericType; @@ -22,7 +25,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; /** Thrown when a suitable {@link TypeCodec} cannot be found by the {@link CodecRegistry}. */ -public class CodecNotFoundException extends RuntimeException { +public class CodecNotFoundException extends DriverException { private final DataType cqlType; @@ -48,7 +51,7 @@ public CodecNotFoundException( private CodecNotFoundException( String msg, Throwable cause, DataType cqlType, GenericType javaType) { - super(msg, cause); + super(msg, null, cause, true); this.cqlType = cqlType; this.javaType = javaType; } @@ -62,4 +65,10 @@ public DataType getCqlType() { public GenericType getJavaType() { return javaType; } + + @NonNull + @Override + public DriverException copy() { + return new CodecNotFoundException(getMessage(), getCause(), getCqlType(), getJavaType()); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java new file mode 100644 index 00000000000..51a96a16376 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.java @@ -0,0 +1,492 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.type.codec; + +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.DefaultVectorType; +import com.datastax.oss.driver.internal.core.type.codec.SimpleBlobCodec; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.OptionalCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.BooleanListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.ByteListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.DoubleListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.FloatListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.IntListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.LongListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.ObjectListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.ShortListToArrayCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.enums.EnumNameCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.enums.EnumOrdinalCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.json.JsonCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.time.LocalTimestampCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.time.PersistentZonedTimestampCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.time.TimestampMillisCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.time.ZonedTimestampCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.vector.FloatVectorToArrayCodec; +import com.fasterxml.jackson.databind.ObjectMapper; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Optional; + +/** + * Additional codecs that can be registered to handle different type mappings. + * + * @see SessionBuilder#addTypeCodecs(TypeCodec[]) + * @see MutableCodecRegistry#register(TypeCodec) + */ +public class ExtraTypeCodecs { + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the UTC time zone + * to parse and format CQL literals. + * + *

      This codec uses {@link ZoneOffset#UTC} as its source of time zone information when + * formatting values as CQL literals, or parsing CQL literals that do not have any time zone + * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link + * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a + * bound statement or reading a column from a row, are not affected by the time zone. + * + *

      If you need a different time zone, consider other constants in this class, or call {@link + * ExtraTypeCodecs#timestampAt(ZoneId)} instead. + * + * @see TypeCodecs#TIMESTAMP + * @see ExtraTypeCodecs#timestampAt(ZoneId) + */ + public static final TypeCodec TIMESTAMP_UTC = new TimestampCodec(ZoneOffset.UTC); + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@code long}, representing the number of + * milliseconds since the Epoch, using the system's default time zone to parse and format CQL + * literals. + * + *

      This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its + * source of time zone information when formatting values as CQL literals, or parsing CQL literals + * that do not have any time zone indication. Note that this only applies to the {@link + * TypeCodec#format(Object)} and {@link TypeCodec#parse(String)} methods; regular encoding and + * decoding, like setting a value on a bound statement or reading a column from a row, are not + * affected by the time zone. + * + *

      If you need a different time zone, consider other constants in this class, or call {@link + * #timestampMillisAt(ZoneId)} instead. + * + *

      This codec can serve as a replacement for the driver's built-in {@linkplain + * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw + * milliseconds than with {@link Instant} instances. + * + * @see #TIMESTAMP_MILLIS_UTC + * @see #timestampMillisAt(ZoneId) + */ + public static final PrimitiveLongCodec TIMESTAMP_MILLIS_SYSTEM = new TimestampMillisCodec(); + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@code long}, representing the number of + * milliseconds since the Epoch, using the UTC time zone to parse and format CQL literals. + * + *

      This codec uses {@link ZoneOffset#UTC} as its source of time zone information when + * formatting values as CQL literals, or parsing CQL literals that do not have any time zone + * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link + * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a + * bound statement or reading a column from a row, are not affected by the time zone. + * + *

      If you need a different time zone, consider other constants in this class, or call {@link + * #timestampMillisAt(ZoneId)} instead. + * + *

      This codec can serve as a replacement for the driver's built-in {@linkplain + * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw + * milliseconds than with {@link Instant} instances. + * + * @see #TIMESTAMP_MILLIS_SYSTEM + * @see #timestampMillisAt(ZoneId) + */ + public static final PrimitiveLongCodec TIMESTAMP_MILLIS_UTC = + new TimestampMillisCodec(ZoneOffset.UTC); + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}, using the + * system's default time zone. + * + *

      This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its + * source of time zone information when encoding or decoding. If you need a different time zone, + * consider using other constants in this class, or call {@link #zonedTimestampAt(ZoneId)} + * instead. + * + *

      Note that CQL type {@code timestamp} type does not store any time zone; this codec is + * provided merely as a convenience for users that need to deal with zoned timestamps in their + * applications. + * + * @see #ZONED_TIMESTAMP_UTC + * @see #ZONED_TIMESTAMP_PERSISTED + * @see #zonedTimestampAt(ZoneId) + */ + public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = new ZonedTimestampCodec(); + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}, using the UTC + * time zone. + * + *

      This codec uses {@link ZoneOffset#UTC} as its source of time zone information when encoding + * or decoding. If you need a different time zone, consider using other constants in this class, + * or call {@link #zonedTimestampAt(ZoneId)} instead. + * + *

      Note that CQL type {@code timestamp} type does not store any time zone; this codec is + * provided merely as a convenience for users that need to deal with zoned timestamps in their + * applications. + * + * @see #ZONED_TIMESTAMP_SYSTEM + * @see #ZONED_TIMESTAMP_PERSISTED + * @see #zonedTimestampAt(ZoneId) + */ + public static final TypeCodec ZONED_TIMESTAMP_UTC = + new ZonedTimestampCodec(ZoneOffset.UTC); + + /** + * A codec that maps CQL type {@code tuple} to Java's {@link ZonedDateTime}, + * providing a pattern for maintaining timezone information in Cassandra. + * + *

      Since CQL type {@code timestamp} does not store any time zone, it is persisted separately in + * the {@code text} field of the tuple, and so when the value is read back the original timezone + * it was written with is preserved. + * + * @see #ZONED_TIMESTAMP_SYSTEM + * @see #ZONED_TIMESTAMP_UTC + * @see #zonedTimestampAt(ZoneId) + */ + public static final TypeCodec ZONED_TIMESTAMP_PERSISTED = + new PersistentZonedTimestampCodec(); + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}, using the + * system's default time zone. + * + *

      This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its + * source of time zone information when encoding or decoding. If you need a different time zone, + * consider using other constants in this class, or call {@link #localTimestampAt(ZoneId)} + * instead. + * + *

      Note that CQL type {@code timestamp} does not store any time zone; this codec is provided + * merely as a convenience for users that need to deal with local date-times in their + * applications. + * + * @see #LOCAL_TIMESTAMP_UTC + * @see #localTimestampAt(ZoneId) + */ + public static final TypeCodec LOCAL_TIMESTAMP_SYSTEM = new LocalTimestampCodec(); + + /** + * A codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}, using the UTC + * time zone. + * + *

      This codec uses {@link ZoneOffset#UTC} as its source of time zone information when encoding + * or decoding. If you need a different time zone, consider using other constants in this class, + * or call {@link #localTimestampAt(ZoneId)} instead. + * + *

      Note that CQL type {@code timestamp} does not store any time zone; this codec is provided + * merely as a convenience for users that need to deal with local date-times in their + * applications. + * + * @see #LOCAL_TIMESTAMP_SYSTEM + * @see #localTimestampAt(ZoneId) + */ + public static final TypeCodec LOCAL_TIMESTAMP_UTC = + new LocalTimestampCodec(ZoneOffset.UTC); + + /** + * A codec that maps CQL type {@code blob} to Java's {@code byte[]}. + * + *

      If you are looking for a codec mapping CQL type {@code blob} to the Java type {@link + * ByteBuffer}, you should use {@link TypeCodecs#BLOB} instead. + * + *

      If you are looking for a codec mapping CQL type {@code list BLOB_TO_ARRAY = new SimpleBlobCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code boolean[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code boolean[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * boolean} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ + public static final TypeCodec BOOLEAN_LIST_TO_ARRAY = new BooleanListToArrayCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code byte[]}. + * + *

      This codec is not suitable for reading CQL blobs as byte arrays. If you are looking for a + * codec for the CQL type {@code blob}, you should use {@link TypeCodecs#BLOB} or {@link + * ExtraTypeCodecs#BLOB_TO_ARRAY} instead. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code byte[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * byte} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + * + * @see TypeCodecs#BLOB + * @see ExtraTypeCodecs#BLOB_TO_ARRAY + */ + public static final TypeCodec BYTE_LIST_TO_ARRAY = new ByteListToArrayCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code short[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code short[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * short} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ + public static final TypeCodec SHORT_LIST_TO_ARRAY = new ShortListToArrayCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code int[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code int[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code int} + * values; it also instantiates arrays without the need for an intermediary Java {@code List} + * object. + */ + public static final TypeCodec INT_LIST_TO_ARRAY = new IntListToArrayCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code long[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code long[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * long} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ + public static final TypeCodec LONG_LIST_TO_ARRAY = new LongListToArrayCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code float[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code float[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * float} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ + public static final TypeCodec FLOAT_LIST_TO_ARRAY = new FloatListToArrayCodec(); + + /** + * A codec that maps CQL type {@code list} to Java's {@code double[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code double[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * double} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ + public static final TypeCodec DOUBLE_LIST_TO_ARRAY = new DoubleListToArrayCodec(); + + /** + * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the + * given time zone to parse and format CQL literals. + * + *

      This codec uses the supplied {@link ZoneId} as its source of time zone information when + * formatting values as CQL literals, or parsing CQL literals that do not have any time zone + * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link + * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a + * bound statement or reading a column from a row, are not affected by the time zone. + * + * @see TypeCodecs#TIMESTAMP + * @see ExtraTypeCodecs#TIMESTAMP_UTC + */ + @NonNull + public static TypeCodec timestampAt(@NonNull ZoneId timeZone) { + return new TimestampCodec(timeZone); + } + + /** + * Builds a new codec that maps CQL type {@code timestamp} to Java's {@code long}, representing + * the number of milliseconds since the Epoch, using the given time zone to parse and format CQL + * literals. + * + *

      This codec uses the supplied {@link ZoneId} as its source of time zone information when + * formatting values as CQL literals, or parsing CQL literals that do not have any time zone + * indication. Note that this only applies to the {@link TypeCodec#format(Object)} and {@link + * TypeCodec#parse(String)} methods; regular encoding and decoding, like setting a value on a + * bound statement or reading a column from a row, are not affected by the time zone. + * + *

      This codec can serve as a replacement for the driver's built-in {@linkplain + * TypeCodecs#TIMESTAMP timestamp} codec, when application code prefers to deal with raw + * milliseconds than with {@link Instant} instances. + * + * @see ExtraTypeCodecs#TIMESTAMP_MILLIS_SYSTEM + * @see ExtraTypeCodecs#TIMESTAMP_MILLIS_UTC + */ + @NonNull + public static PrimitiveLongCodec timestampMillisAt(@NonNull ZoneId timeZone) { + return new TimestampMillisCodec(timeZone); + } + + /** + * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link ZonedDateTime}. + * + *

      This codec uses the supplied {@link ZoneId} as its source of time zone information when + * encoding or decoding. + * + *

      Note that CQL type {@code timestamp} does not store any time zone; the codecs created by + * this method are provided merely as a convenience for users that need to deal with zoned + * timestamps in their applications. + * + * @see ExtraTypeCodecs#ZONED_TIMESTAMP_SYSTEM + * @see ExtraTypeCodecs#ZONED_TIMESTAMP_UTC + * @see ExtraTypeCodecs#ZONED_TIMESTAMP_PERSISTED + */ + @NonNull + public static TypeCodec zonedTimestampAt(@NonNull ZoneId timeZone) { + return new ZonedTimestampCodec(timeZone); + } + + /** + * Builds a new codec that maps CQL type {@code timestamp} to Java's {@link LocalDateTime}. + * + *

      This codec uses the supplied {@link ZoneId} as its source of time zone information when + * encoding or decoding. + * + *

      Note that CQL type {@code timestamp} does not store any time zone; the codecs created by + * this method are provided merely as a convenience for users that need to deal with local + * date-times in their applications. + * + * @see ExtraTypeCodecs#LOCAL_TIMESTAMP_UTC + * @see #localTimestampAt(ZoneId) + */ + @NonNull + public static TypeCodec localTimestampAt(@NonNull ZoneId timeZone) { + return new LocalTimestampCodec(timeZone); + } + + /** + * Builds a new codec that maps a CQL list to a Java array. Encoding and decoding of elements in + * the array is delegated to the provided element codec. + * + *

      This method is not suitable for Java primitive arrays. Use {@link + * ExtraTypeCodecs#BOOLEAN_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#BYTE_LIST_TO_ARRAY}, {@link + * ExtraTypeCodecs#SHORT_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#INT_LIST_TO_ARRAY}, {@link + * ExtraTypeCodecs#LONG_LIST_TO_ARRAY}, {@link ExtraTypeCodecs#FLOAT_LIST_TO_ARRAY} or {@link + * ExtraTypeCodecs#DOUBLE_LIST_TO_ARRAY} instead. + */ + @NonNull + public static TypeCodec listToArrayOf(@NonNull TypeCodec elementCodec) { + return new ObjectListToArrayCodec<>(elementCodec); + } + + /** + * Builds a new codec that maps CQL type {@code int} to a Java Enum, according to its constants' + * {@linkplain Enum#ordinal() ordinals} (STRONGLY discouraged, see explanations below). + * + *

      This method is provided for compatibility with driver 3, but we strongly recommend against + * it. Relying on enum ordinals is a bad practice: any reordering of the enum constants, or + * insertion of a new constant before the end, will change the ordinals. The codec will keep + * working, but start inserting different codes and corrupting your data. + * + *

      {@link #enumNamesOf(Class)} is a safer alternative, as it is not dependent on the constant + * order. If you still want to use integer codes for storage efficiency, we recommend implementing + * an explicit mapping (for example with a {@code toCode()} method on your enum type). It is then + * fairly straightforward to implement a codec with {@link MappingCodec}, using {@link + * TypeCodecs#INT} as the "inner" codec. + */ + @NonNull + public static > TypeCodec enumOrdinalsOf( + @NonNull Class enumClass) { + return new EnumOrdinalCodec<>(enumClass); + } + + /** + * Builds a new codec that maps CQL type {@code text} to a Java Enum, according to its constants' + * programmatic {@linkplain Enum#name() names}. + * + * @see #enumOrdinalsOf(Class) + */ + @NonNull + public static > TypeCodec enumNamesOf( + @NonNull Class enumClass) { + return new EnumNameCodec<>(enumClass); + } + + /** + * Builds a new codec that wraps another codec's Java type into {@link Optional} instances + * (mapping CQL null to {@link Optional#empty()}). + */ + @NonNull + public static TypeCodec> optionalOf(@NonNull TypeCodec innerCodec) { + return new OptionalCodec<>(innerCodec); + } + + /** + * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON + * serialization with a default Jackson mapper. + * + * @see Jackson JSON Library + */ + @NonNull + public static TypeCodec json(@NonNull GenericType javaType) { + return new JsonCodec<>(javaType); + } + + /** + * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON + * serialization with a default Jackson mapper. + * + * @see Jackson JSON Library + */ + @NonNull + public static TypeCodec json(@NonNull Class javaType) { + return new JsonCodec<>(javaType); + } + + /** + * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON + * serialization with the provided Jackson mapper. + * + * @see Jackson JSON Library + */ + @NonNull + public static TypeCodec json( + @NonNull GenericType javaType, @NonNull ObjectMapper objectMapper) { + return new JsonCodec<>(javaType, objectMapper); + } + + /** + * Builds a new codec that maps CQL type {@code text} to the given Java type, using JSON + * serialization with the provided Jackson mapper. + * + * @see Jackson JSON Library + */ + @NonNull + public static TypeCodec json( + @NonNull Class javaType, @NonNull ObjectMapper objectMapper) { + return new JsonCodec<>(javaType, objectMapper); + } + + /** Builds a new codec that maps CQL float vectors of the specified size to an array of floats. */ + public static TypeCodec floatVectorToArray(int dimensions) { + return new FloatVectorToArrayCodec(new DefaultVectorType(DataTypes.FLOAT, dimensions)); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java new file mode 100644 index 00000000000..df1a34a566a --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/MappingCodec.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.type.codec; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.util.Objects; + +/** + * A {@link TypeCodec} that maps instances of {@code InnerT}, a driver supported Java type, to + * instances of a target {@code OuterT} Java type. + * + *

      This codec can be used to provide support for Java types that are not natively handled by the + * driver, as long as there is a conversion path to and from another supported Java type. + * + * @param The "inner" Java type; must be a driver supported Java type (that is, there must + * exist a codec registered for it). + * @param The "outer", or target Java type; this codec will handle the mapping to and from + * {@code InnerT} and {@code OuterT}. + * @see driver + * documentation on custom codecs + * @see + * driver supported Java types + */ +public abstract class MappingCodec implements TypeCodec { + + protected final TypeCodec innerCodec; + protected final GenericType outerJavaType; + + /** + * Creates a new mapping codec providing support for {@code OuterT} based on an existing codec for + * {@code InnerT}. + * + * @param innerCodec The inner codec to use to handle instances of InnerT; must not be null. + * @param outerJavaType The outer Java type; must not be null. + */ + protected MappingCodec( + @NonNull TypeCodec innerCodec, @NonNull GenericType outerJavaType) { + this.innerCodec = Objects.requireNonNull(innerCodec, "innerCodec cannot be null"); + this.outerJavaType = Objects.requireNonNull(outerJavaType, "outerJavaType cannot be null"); + } + + /** @return The type of {@code OuterT}. */ + @NonNull + @Override + public GenericType getJavaType() { + return outerJavaType; + } + + /** @return The type of {@code InnerT}. */ + public GenericType getInnerJavaType() { + return innerCodec.getJavaType(); + } + + @NonNull + @Override + public DataType getCqlType() { + return innerCodec.getCqlType(); + } + + @Override + public ByteBuffer encode(OuterT value, @NonNull ProtocolVersion protocolVersion) { + return innerCodec.encode(outerToInner(value), protocolVersion); + } + + @Override + public OuterT decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + return innerToOuter(innerCodec.decode(bytes, protocolVersion)); + } + + @NonNull + @Override + public String format(OuterT value) { + return innerCodec.format(outerToInner(value)); + } + + @Override + public OuterT parse(String value) { + return innerToOuter(innerCodec.parse(value)); + } + + /** + * Converts from an instance of the inner Java type to an instance of the outer Java type. Used + * when deserializing or parsing. + * + * @param value The value to convert; may be null. + * @return The converted value; may be null. + */ + @Nullable + protected abstract OuterT innerToOuter(@Nullable InnerT value); + + /** + * Converts from an instance of the outer Java type to an instance of the inner Java type. Used + * when serializing or formatting. + * + * @param value The value to convert; may be null. + * @return The converted value; may be null. + */ + @Nullable + protected abstract InnerT outerToInner(@Nullable OuterT value); +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java index 45f3577284a..2ad4f2fa15a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveBooleanCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java index 119b950cfb9..5909bcd4ff9 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveByteCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java index 87d504a1ee3..c46160f0942 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveDoubleCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java index fcb048da7ae..585d5fdb1fa 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveFloatCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java index e029302f060..b3f374eb8d7 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveIntCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java index 56eaeb3e52c..ec65820c60f 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveLongCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java index dfeb7f0c72a..48c063b3dc6 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/PrimitiveShortCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java index 777d35b25fb..d6afbe0380a 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; /** * Manages the two-way conversion between a CQL type and a Java type. @@ -232,4 +235,9 @@ default boolean accepts(@NonNull DataType cqlType) { */ @Nullable JavaTypeT parse(@Nullable String value); + + @NonNull + default Optional serializedSize() { + return Optional.empty(); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java index ac421f2a046..68f1b07b106 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.api.core.type.codec; import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.core.data.TupleValue; import com.datastax.oss.driver.api.core.data.UdtValue; import com.datastax.oss.driver.api.core.type.CustomType; @@ -23,6 +26,7 @@ import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.TupleType; import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.VectorType; import com.datastax.oss.driver.internal.core.type.codec.BigIntCodec; import com.datastax.oss.driver.internal.core.type.codec.BlobCodec; import com.datastax.oss.driver.internal.core.type.codec.BooleanCodec; @@ -48,7 +52,7 @@ import com.datastax.oss.driver.internal.core.type.codec.UdtCodec; import com.datastax.oss.driver.internal.core.type.codec.UuidCodec; import com.datastax.oss.driver.internal.core.type.codec.VarIntCodec; -import com.datastax.oss.driver.internal.core.type.codec.ZonedTimestampCodec; +import com.datastax.oss.driver.internal.core.type.codec.VectorCodec; import com.datastax.oss.driver.shaded.guava.common.base.Charsets; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import edu.umd.cs.findbugs.annotations.NonNull; @@ -60,82 +64,133 @@ import java.time.LocalDate; import java.time.LocalTime; import java.time.ZoneId; -import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; -/** Constants and factory methods to obtain type codec instances. */ +/** + * Constants and factory methods to obtain instances of the driver's default type codecs. + * + *

      See also {@link ExtraTypeCodecs} for additional codecs that you can register with your session + * to handle different type mappings. + */ public class TypeCodecs { + /** The default codec that maps CQL type {@code boolean} to Java's {@code boolean}. */ public static final PrimitiveBooleanCodec BOOLEAN = new BooleanCodec(); + + /** The default codec that maps CQL type {@code tinyint} to Java's {@code byte}. */ public static final PrimitiveByteCodec TINYINT = new TinyIntCodec(); + + /** The default codec that maps CQL type {@code double} to Java's {@code double}. */ public static final PrimitiveDoubleCodec DOUBLE = new DoubleCodec(); + + /** The default codec that maps CQL type {@code counter} to Java's {@code long}. */ public static final PrimitiveLongCodec COUNTER = new CounterCodec(); + + /** The default codec that maps CQL type {@code float} to Java's {@code float}. */ public static final PrimitiveFloatCodec FLOAT = new FloatCodec(); + + /** The default codec that maps CQL type {@code int} to Java's {@code int}. */ public static final PrimitiveIntCodec INT = new IntCodec(); + + /** The default codec that maps CQL type {@code bigint} to Java's {@code long}. */ public static final PrimitiveLongCodec BIGINT = new BigIntCodec(); + + /** The default codec that maps CQL type {@code smallint} to Java's {@code short}. */ public static final PrimitiveShortCodec SMALLINT = new SmallIntCodec(); - public static final TypeCodec TIMESTAMP = new TimestampCodec(); /** - * A codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link - * ZonedDateTime}, using the system's {@linkplain ZoneId#systemDefault() default time zone} as its - * source of time zone information. + * The default codec that maps CQL type {@code timestamp} to Java's {@link Instant}, using the + * system's default time zone to parse and format CQL literals. + * + *

      This codec uses the system's {@linkplain ZoneId#systemDefault() default time zone} as its + * source of time zone information when formatting values as CQL literals, or parsing CQL literals + * that do not have any time zone indication. Note that this only applies to the {@link + * TypeCodec#format(Object)} and {@link TypeCodec#parse(String)} methods; regular encoding and + * decoding, like setting a value on a bound statement or reading a column from a row, are not + * affected by the time zone. * - *

      Note that Apache Cassandra(R)'s timestamp type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. + *

      If you need a different time zone, consider other codecs in {@link ExtraTypeCodecs}, or call + * {@link ExtraTypeCodecs#timestampAt(ZoneId)} instead. * - * @see #ZONED_TIMESTAMP_UTC - * @see #zonedTimestampAt(ZoneId) + * @see ExtraTypeCodecs#TIMESTAMP_UTC + * @see ExtraTypeCodecs#timestampAt(ZoneId) */ - public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = new ZonedTimestampCodec(); + public static final TypeCodec TIMESTAMP = new TimestampCodec(); + + /** The default codec that maps CQL type {@code date} to Java's {@link LocalDate}. */ + public static final TypeCodec DATE = new DateCodec(); + + /** The default codec that maps CQL type {@code time} to Java's {@link LocalTime}. */ + public static final TypeCodec TIME = new TimeCodec(); /** - * A codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link - * ZonedDateTime}, using {@link ZoneOffset#UTC} as its source of time zone information. + * The default codec that maps CQL type {@code blob} to Java's {@link ByteBuffer}. + * + *

      If you are looking for a codec mapping CQL type {@code blob} to the Java type {@code + * byte[]}, you should use {@link ExtraTypeCodecs#BLOB_TO_ARRAY} instead. * - *

      Note that Apache Cassandra(R)'s timestamp type does not store any time zone; this codec is - * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. + *

      If you are looking for a codec mapping CQL type {@code list} to the Java type + * {@code byte[]}, you should use {@link ExtraTypeCodecs#BYTE_LIST_TO_ARRAY} instead. * - * @see #ZONED_TIMESTAMP_SYSTEM - * @see #zonedTimestampAt(ZoneId) + * @see ExtraTypeCodecs#BLOB_TO_ARRAY + * @see ExtraTypeCodecs#BYTE_LIST_TO_ARRAY */ - public static final TypeCodec ZONED_TIMESTAMP_UTC = - new ZonedTimestampCodec(ZoneOffset.UTC); - - public static final TypeCodec DATE = new DateCodec(); - public static final TypeCodec TIME = new TimeCodec(); public static final TypeCodec BLOB = new BlobCodec(); + + /** The default codec that maps CQL type {@code text} to Java's {@link String}. */ public static final TypeCodec TEXT = new StringCodec(DataTypes.TEXT, Charsets.UTF_8); + /** The default codec that maps CQL type {@code ascii} to Java's {@link String}. */ public static final TypeCodec ASCII = new StringCodec(DataTypes.ASCII, Charsets.US_ASCII); + /** The default codec that maps CQL type {@code varint} to Java's {@link BigInteger}. */ public static final TypeCodec VARINT = new VarIntCodec(); + /** The default codec that maps CQL type {@code decimal} to Java's {@link BigDecimal}. */ public static final TypeCodec DECIMAL = new DecimalCodec(); + /** The default codec that maps CQL type {@code uuid} to Java's {@link UUID}. */ public static final TypeCodec UUID = new UuidCodec(); + /** The default codec that maps CQL type {@code timeuuid} to Java's {@link UUID}. */ public static final TypeCodec TIMEUUID = new TimeUuidCodec(); + /** The default codec that maps CQL type {@code inet} to Java's {@link InetAddress}. */ public static final TypeCodec INET = new InetCodec(); + /** The default codec that maps CQL type {@code duration} to the driver's {@link CqlDuration}. */ public static final TypeCodec DURATION = new CqlDurationCodec(); + /** + * Builds a new codec that maps a CQL custom type to Java's {@link ByteBuffer}. + * + * @param cqlType the fully-qualified name of the custom type. + */ @NonNull public static TypeCodec custom(@NonNull DataType cqlType) { Preconditions.checkArgument(cqlType instanceof CustomType, "cqlType must be a custom type"); return new CustomCodec((CustomType) cqlType); } + /** + * Builds a new codec that maps a CQL list to a Java list, using the given codec to map each + * element. + */ @NonNull public static TypeCodec> listOf(@NonNull TypeCodec elementCodec) { return new ListCodec<>(DataTypes.listOf(elementCodec.getCqlType()), elementCodec); } + /** + * Builds a new codec that maps a CQL set to a Java set, using the given codec to map each + * element. + */ @NonNull public static TypeCodec> setOf(@NonNull TypeCodec elementCodec) { return new SetCodec<>(DataTypes.setOf(elementCodec.getCqlType()), elementCodec); } + /** + * Builds a new codec that maps a CQL map to a Java map, using the given codecs to map each key + * and value. + */ @NonNull public static TypeCodec> mapOf( @NonNull TypeCodec keyCodec, @NonNull TypeCodec valueCodec) { @@ -143,29 +198,67 @@ public static TypeCodec> mapOf( DataTypes.mapOf(keyCodec.getCqlType(), valueCodec.getCqlType()), keyCodec, valueCodec); } + /** + * Builds a new codec that maps a CQL tuple to the driver's {@link TupleValue}, for the given type + * definition. + * + *

      Note that the components of a {@link TupleValue} are stored in their encoded form. They are + * encoded/decoded on the fly when you set or get them, using the codec registry. + */ @NonNull public static TypeCodec tupleOf(@NonNull TupleType cqlType) { return new TupleCodec(cqlType); } + public static TypeCodec> vectorOf( + @NonNull VectorType type, @NonNull TypeCodec subtypeCodec) { + return new VectorCodec( + DataTypes.vectorOf(subtypeCodec.getCqlType(), type.getDimensions()), subtypeCodec); + } + + public static TypeCodec> vectorOf( + int dimensions, @NonNull TypeCodec subtypeCodec) { + return new VectorCodec(DataTypes.vectorOf(subtypeCodec.getCqlType(), dimensions), subtypeCodec); + } + + /** + * Builds a new codec that maps a CQL user defined type to the driver's {@link UdtValue}, for the + * given type definition. + * + *

      Note that the fields of a {@link UdtValue} are stored in their encoded form. They are + * encoded/decoded on the fly when you set or get them, using the codec registry. + */ @NonNull public static TypeCodec udtOf(@NonNull UserDefinedType cqlType) { return new UdtCodec(cqlType); } /** - * Returns a codec that handles Apache Cassandra(R)'s timestamp type and maps it to Java's {@link - * ZonedDateTime}, using the supplied {@link ZoneId} as its source of time zone information. + * An alias for {@link ExtraTypeCodecs#ZONED_TIMESTAMP_SYSTEM}. + * + *

      This exists for historical reasons: the constant was originally defined in this class, but + * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. + */ + public static final TypeCodec ZONED_TIMESTAMP_SYSTEM = + ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; + + /** + * An alias for {@link ExtraTypeCodecs#ZONED_TIMESTAMP_UTC}. * - *

      Note that Apache Cassandra(R)'s timestamp type does not store any time zone; the codecs - * created by this method are provided merely as a convenience for users that need to deal with - * zoned timestamps in their applications. + *

      This exists for historical reasons: the constant was originally defined in this class, but + * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. + */ + public static final TypeCodec ZONED_TIMESTAMP_UTC = + ExtraTypeCodecs.ZONED_TIMESTAMP_UTC; + + /** + * An alias for {@link ExtraTypeCodecs#zonedTimestampAt(ZoneId)}. * - * @see #ZONED_TIMESTAMP_SYSTEM - * @see #ZONED_TIMESTAMP_UTC + *

      This exists for historical reasons: the method was originally defined in this class, but + * technically it belongs to {@link ExtraTypeCodecs} because this is not a built-in mapping. */ @NonNull public static TypeCodec zonedTimestampAt(@NonNull ZoneId timeZone) { - return new ZonedTimestampCodec(timeZone); + return ExtraTypeCodecs.zonedTimestampAt(timeZone); } } diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java index 246bd3eeba1..36472f34c79 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,13 +42,28 @@ * *

      They may also provide additional mappings to other Java types (for use with methods such as * {@link Row#get(int, Class)}, {@link TupleValue#set(int, Object, Class)}, etc.) + * + *

      The default implementation returned by the driver also implements {@link + * MutableCodecRegistry}, and we strongly recommend that custom implementations do as well. The two + * interfaces are only separate for backward compatibility, because mutability was introduced in + * 4.3.0. */ public interface CodecRegistry { /** * An immutable instance, that only handles built-in driver types (that is, primitive types, and * collections, tuples, and user defined types thereof). + * + *

      Note that, due to implementation details, this instance is a {@link MutableCodecRegistry}, + * but any attempt to {@linkplain MutableCodecRegistry#register(TypeCodec) register new codecs} + * will throw {@link UnsupportedOperationException}. */ - CodecRegistry DEFAULT = new DefaultCodecRegistry("default"); + CodecRegistry DEFAULT = + new DefaultCodecRegistry("default") { + @Override + public void register(TypeCodec newCodec) { + throw new UnsupportedOperationException("CodecRegistry.DEFAULT is immutable"); + } + }; /** * Returns a codec to handle the conversion between the given types. diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java new file mode 100644 index 00000000000..7f5d1fb9813 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.type.codec.registry; + +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; + +/** + * A codec registry that can be extended with new user codecs at runtime. + * + *

      This interface only exists to preserve backward compatibility. In practice, the default {@link + * CodecRegistry} implementation returned by the driver implements this interface, so it can safely + * be cast. + * + *

      However {@link CodecRegistry#DEFAULT} is immutable. It implements this interface, but {@link + * #register(TypeCodec)} throws an {@link UnsupportedOperationException}. + * + * @since 4.3.0 + */ +public interface MutableCodecRegistry extends CodecRegistry { + + /** + * Adds the given codec to the registry. + * + *

      This method will log a warning and ignore the codec if it collides with one already present + * in the registry. Note that the driver's built-in implementation uses internal synchronization + * to guarantee that two threads cannot register colliding codecs concurrently; registration is + * not expected to happen in a very concurrent manner, so this should not pose a performance + * issue. + */ + void register(TypeCodec codec); + + /** Invokes {@link #register(TypeCodec)} for every codec in the given list. */ + default void register(TypeCodec... codecs) { + for (TypeCodec codec : codecs) { + register(codec); + } + } + + /** Invokes {@link #register(TypeCodec)} for every codec in the given list. */ + default void register(Iterable> codecs) { + for (TypeCodec codec : codecs) { + register(codec); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java index daa269862c3..d22b6f1bfaf 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.api.core.type.reflect; import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.core.data.GettableByIndex; import com.datastax.oss.driver.api.core.data.TupleValue; import com.datastax.oss.driver.api.core.data.UdtValue; @@ -31,12 +34,15 @@ import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; +import java.time.Duration; import java.time.Instant; import java.time.LocalDate; +import java.time.LocalDateTime; import java.time.LocalTime; import java.time.ZonedDateTime; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import net.jcip.annotations.Immutable; @@ -94,6 +100,7 @@ public class GenericType { public static final GenericType ZONED_DATE_TIME = of(ZonedDateTime.class); public static final GenericType LOCAL_DATE = of(LocalDate.class); public static final GenericType LOCAL_TIME = of(LocalTime.class); + public static final GenericType LOCAL_DATE_TIME = of(LocalDateTime.class); public static final GenericType BYTE_BUFFER = of(ByteBuffer.class); public static final GenericType STRING = of(String.class); public static final GenericType BIG_INTEGER = of(BigInteger.class); @@ -103,6 +110,7 @@ public class GenericType { public static final GenericType CQL_DURATION = of(CqlDuration.class); public static final GenericType TUPLE_VALUE = of(TupleValue.class); public static final GenericType UDT_VALUE = of(UdtValue.class); + public static final GenericType DURATION = of(Duration.class); @NonNull public static GenericType of(@NonNull Class type) { @@ -142,6 +150,21 @@ public static GenericType> setOf(@NonNull GenericType elementType) return new GenericType<>(token); } + @NonNull + public static GenericType> vectorOf(@NonNull Class elementType) { + TypeToken> token = + new TypeToken>() {}.where( + new TypeParameter() {}, TypeToken.of(elementType)); + return new GenericType<>(token); + } + + @NonNull + public static GenericType> vectorOf(@NonNull GenericType elementType) { + TypeToken> token = + new TypeToken>() {}.where(new TypeParameter() {}, elementType.token); + return new GenericType<>(token); + } + @NonNull public static GenericType> mapOf( @NonNull Class keyType, @NonNull Class valueType) { @@ -160,6 +183,35 @@ public static GenericType> mapOf( return new GenericType<>(token); } + @NonNull + public static GenericType arrayOf(@NonNull Class componentType) { + TypeToken token = + new TypeToken() {}.where(new TypeParameter() {}, TypeToken.of(componentType)); + return new GenericType<>(token); + } + + @NonNull + public static GenericType arrayOf(@NonNull GenericType componentType) { + TypeToken token = + new TypeToken() {}.where(new TypeParameter() {}, componentType.token); + return new GenericType<>(token); + } + + @NonNull + public static GenericType> optionalOf(@NonNull Class componentType) { + TypeToken> token = + new TypeToken>() {}.where( + new TypeParameter() {}, TypeToken.of(componentType)); + return new GenericType<>(token); + } + + @NonNull + public static GenericType> optionalOf(@NonNull GenericType componentType) { + TypeToken> token = + new TypeToken>() {}.where(new TypeParameter() {}, componentType.token); + return new GenericType<>(token); + } + private final TypeToken token; private GenericType(TypeToken token) { diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java index d62e8890600..3bf0e3537e0 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeParameter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java b/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java index 7e82b4c685f..8dae31f3734 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java +++ b/core/src/main/java/com/datastax/oss/driver/api/core/uuid/Uuids.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,16 +27,17 @@ import java.net.NetworkInterface; import java.net.SocketException; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.Calendar; import java.util.Date; import java.util.Enumeration; import java.util.HashSet; +import java.util.Objects; import java.util.Properties; import java.util.Random; import java.util.Set; -import java.util.TimeZone; +import java.util.SplittableRandom; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; import org.slf4j.Logger; @@ -75,12 +78,80 @@ public final class Uuids { /** The system property to use to force the value of the process ID ({@value}). */ public static final String PID_SYSTEM_PROPERTY = "com.datastax.oss.driver.PID"; + /** + * The namespace UUID for URLs, as defined in Appendix C of RFC-4122. When using + * this namespace to create a name-based UUID, it is expected that the name part be a valid {@link + * java.net.URL URL}. + */ + public static final UUID NAMESPACE_URL = UUID.fromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"); + + /** + * The namespace UUID for fully-qualified domain names, as defined in Appendix C of RFC-4122. When using + * this namespace to create a name-based UUID, it is expected that the name part be a valid domain + * name. + */ + public static final UUID NAMESPACE_DNS = UUID.fromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"); + + /** + * The namespace UUID for OIDs, as defined in Appendix C of RFC-4122. When using + * this namespace to create a name-based UUID, it is expected that the name part be an ISO OID. + */ + public static final UUID NAMESPACE_OID = UUID.fromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"); + + /** + * The namespace UUID for X.500 domain names, as defined in Appendix C of RFC-4122. When using + * this namespace to create a name-based UUID, it is expected that the name part be a valid X.500 + * domain name, in DER or a text output format. + */ + public static final UUID NAMESPACE_X500 = UUID.fromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"); + private static final Logger LOG = LoggerFactory.getLogger(Uuids.class); private Uuids() {} - private static final long START_EPOCH = makeEpoch(); - private static final long CLOCK_SEQ_AND_NODE = makeClockSeqAndNode(); + /** + * UUID v1 timestamps must be expressed relatively to October 15th, 1582 – the day when Gregorian + * calendar was introduced. This constant captures that moment in time expressed in milliseconds + * before the Unix epoch. It can be obtained by calling: + * + *

      +   *   Instant.parse("1582-10-15T00:00:00Z").toEpochMilli();
      +   * 
      + */ + private static final long START_EPOCH_MILLIS = -12219292800000L; + + // Lazily initialize clock seq + node value at time of first access. Quarkus will attempt to + // initialize this class at deployment time which prevents us from just setting this value + // directly. The "node" part of the clock seq + node includes the current PID which (for + // GraalVM users) we obtain via the LLVM interop. That infrastructure isn't setup at Quarkus + // deployment time, however, thus we can't just call makeClockSeqAndNode() in an initializer. + // See JAVA-2663 for more detail on this point. + // + // Container impl adapted from Guava's memoized Supplier impl. + private static class ClockSeqAndNodeContainer { + + private volatile boolean initialized = false; + private long val; + + private long get() { + if (!initialized) { + synchronized (ClockSeqAndNodeContainer.class) { + if (!initialized) { + + initialized = true; + val = makeClockSeqAndNode(); + } + } + } + return val; + } + } + + private static final ClockSeqAndNodeContainer CLOCK_SEQ_AND_NODE = new ClockSeqAndNodeContainer(); // The min and max possible lsb for a UUID. // @@ -95,19 +166,6 @@ private Uuids() {} private static final AtomicLong lastTimestamp = new AtomicLong(0L); - private static long makeEpoch() { - // UUID v1 timestamps must be in 100-nanoseconds interval since 00:00:00.000 15 Oct 1582. - Calendar c = Calendar.getInstance(TimeZone.getTimeZone("GMT-0")); - c.set(Calendar.YEAR, 1582); - c.set(Calendar.MONTH, Calendar.OCTOBER); - c.set(Calendar.DAY_OF_MONTH, 15); - c.set(Calendar.HOUR_OF_DAY, 0); - c.set(Calendar.MINUTE, 0); - c.set(Calendar.SECOND, 0); - c.set(Calendar.MILLISECOND, 0); - return c.getTimeInMillis(); - } - private static long makeNode() { // We don't have access to the MAC address (in pure JAVA at least) but need to generate a node @@ -165,6 +223,7 @@ private static String getProcessPiece() { } if (pid == null) { try { + @SuppressWarnings("StringSplitter") String pidJmx = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; pid = Integer.parseInt(pidJmx); LOG.info("PID obtained through JMX: {}", pid); @@ -201,11 +260,240 @@ private static long makeClockSeqAndNode() { /** * Creates a new random (version 4) UUID. * - *

      This method is just a convenience for {@code UUID.randomUUID()}. + *

      This method has received a new implementation as of driver 4.10. Unlike the JDK's + * {@link UUID#randomUUID()} method, it does not use anymore the cryptographic {@link + * java.security.SecureRandom} number generator. Instead, it uses the non-cryptographic {@link + * Random} class, with a different seed at every invocation. + * + *

      Using a non-cryptographic generator has two advantages: + * + *

        + *
      1. UUID generation is much faster than with {@link UUID#randomUUID()}; + *
      2. Contrary to {@link UUID#randomUUID()}, UUID generation with this method does not require + * I/O and is not a blocking call, which makes this method better suited for non-blocking + * applications. + *
      + * + * Of course, this method is intended for usage where cryptographic strength is not required, such + * as when generating row identifiers for insertion in the database. If you still need + * cryptographic strength, consider using {@link Uuids#random(Random)} instead, and pass an + * instance of {@link java.security.SecureRandom}. */ @NonNull public static UUID random() { - return UUID.randomUUID(); + return random(new Random()); + } + + /** + * Creates a new random (version 4) UUID using the provided {@link Random} instance. + * + *

      This method offers more flexibility than {@link #random()} as it allows to customize the + * {@link Random} instance to use, and also offers the possibility to reuse instances across + * successive calls. Reusing Random instances is the norm when using {@link + * java.util.concurrent.ThreadLocalRandom}, for instance; however other Random implementations may + * perform poorly under heavy thread contention. + * + *

      Note: some Random implementations, such as {@link java.security.SecureRandom}, may trigger + * I/O activity during random number generation; these instances should not be used in + * non-blocking contexts. + */ + @NonNull + public static UUID random(@NonNull Random random) { + byte[] data = new byte[16]; + random.nextBytes(data); + return buildUuid(data, 4); + } + + /** + * Creates a new random (version 4) UUID using the provided {@link SplittableRandom} instance. + * + *

      This method should be preferred to {@link #random()} when UUID generation happens in massive + * parallel computations, such as when using the ForkJoin framework. Note that {@link + * SplittableRandom} instances are not thread-safe. + */ + @NonNull + public static UUID random(@NonNull SplittableRandom random) { + byte[] data = toBytes(random.nextLong(), random.nextLong()); + return buildUuid(data, 4); + } + + /** + * Creates a new name-based (version 3) {@link UUID} from the given namespace UUID and the given + * string representing the name part. + * + *

      Note that the given string will be converted to bytes using {@link StandardCharsets#UTF_8}. + * + * @param namespace The namespace UUID to use; cannot be null. + * @param name The name part; cannot be null. + * @throws NullPointerException if namespace or name is null. + * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not + * available on this platform. + */ + @NonNull + public static UUID nameBased(@NonNull UUID namespace, @NonNull String name) { + Objects.requireNonNull(name, "name cannot be null"); + return nameBased(namespace, name.getBytes(StandardCharsets.UTF_8)); + } + + /** + * Creates a new name-based (version 3) {@link UUID} from the given namespace UUID and the given + * byte array representing the name part. + * + * @param namespace The namespace UUID to use; cannot be null. + * @param name The name part; cannot be null. + * @throws NullPointerException if namespace or name is null. + * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not + * available on this platform. + */ + @NonNull + public static UUID nameBased(@NonNull UUID namespace, @NonNull byte[] name) { + return nameBased(namespace, name, 3); + } + + /** + * Creates a new name-based (version 3 or version 5) {@link UUID} from the given namespace UUID + * and the given string representing the name part. + * + *

      Note that the given string will be converted to bytes using {@link StandardCharsets#UTF_8}. + * + * @param namespace The namespace UUID to use; cannot be null. + * @param name The name part; cannot be null. + * @param version The version to use, must be either 3 or 5; version 3 uses MD5 as its {@link + * MessageDigest} algorithm, while version 5 uses SHA-1. + * @throws NullPointerException if namespace or name is null. + * @throws IllegalArgumentException if version is not 3 nor 5. + * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is + * not available on this platform. + */ + @NonNull + public static UUID nameBased(@NonNull UUID namespace, @NonNull String name, int version) { + Objects.requireNonNull(name, "name cannot be null"); + return nameBased(namespace, name.getBytes(StandardCharsets.UTF_8), version); + } + + /** + * Creates a new name-based (version 3 or version 5) {@link UUID} from the given namespace UUID + * and the given byte array representing the name part. + * + * @param namespace The namespace UUID to use; cannot be null. + * @param name The name to use; cannot be null. + * @param version The version to use, must be either 3 or 5; version 3 uses MD5 as its {@link + * MessageDigest} algorithm, while version 5 uses SHA-1. + * @throws NullPointerException if namespace or name is null. + * @throws IllegalArgumentException if version is not 3 nor 5. + * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is + * not available on this platform. + */ + @NonNull + public static UUID nameBased(@NonNull UUID namespace, @NonNull byte[] name, int version) { + Objects.requireNonNull(namespace, "namespace cannot be null"); + Objects.requireNonNull(name, "name cannot be null"); + MessageDigest md = newMessageDigest(version); + md.update(toBytes(namespace)); + md.update(name); + return buildUuid(md.digest(), version); + } + + /** + * Creates a new name-based (version 3) {@link UUID} from the given byte array containing the + * namespace UUID and the name parts concatenated together. + * + *

      The byte array is expected to be at least 16 bytes long. + * + * @param namespaceAndName A byte array containing the concatenated namespace UUID and name; + * cannot be null. + * @throws NullPointerException if namespaceAndName is null. + * @throws IllegalArgumentException if namespaceAndName is not at least 16 bytes + * long. + * @throws IllegalStateException if the {@link MessageDigest} algorithm for version 3 (MD5) is not + * available on this platform. + */ + @NonNull + public static UUID nameBased(@NonNull byte[] namespaceAndName) { + return nameBased(namespaceAndName, 3); + } + + /** + * Creates a new name-based (version 3 or version 5) {@link UUID} from the given byte array + * containing the namespace UUID and the name parts concatenated together. + * + *

      The byte array is expected to be at least 16 bytes long. + * + * @param namespaceAndName A byte array containing the concatenated namespace UUID and name; + * cannot be null. + * @param version The version to use, must be either 3 or 5. + * @throws NullPointerException if namespaceAndName is null. + * @throws IllegalArgumentException if namespaceAndName is not at least 16 bytes + * long. + * @throws IllegalArgumentException if version is not 3 nor 5. + * @throws IllegalStateException if the {@link MessageDigest} algorithm for the desired version is + * not available on this platform. + */ + @NonNull + public static UUID nameBased(@NonNull byte[] namespaceAndName, int version) { + Objects.requireNonNull(namespaceAndName, "namespaceAndName cannot be null"); + if (namespaceAndName.length < 16) { + throw new IllegalArgumentException("namespaceAndName must be at least 16 bytes long"); + } + MessageDigest md = newMessageDigest(version); + md.update(namespaceAndName); + return buildUuid(md.digest(), version); + } + + @NonNull + private static MessageDigest newMessageDigest(int version) { + if (version != 3 && version != 5) { + throw new IllegalArgumentException( + "Invalid name-based UUID version, expecting 3 or 5, got: " + version); + } + String algorithm = version == 3 ? "MD5" : "SHA-1"; + try { + return MessageDigest.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(algorithm + " algorithm not available", e); + } + } + + @NonNull + private static UUID buildUuid(@NonNull byte[] data, int version) { + // clear and set version + data[6] &= (byte) 0x0f; + data[6] |= (byte) (version << 4); + // clear and set variant to IETF + data[8] &= (byte) 0x3f; + data[8] |= (byte) 0x80; + return fromBytes(data); + } + + private static UUID fromBytes(byte[] data) { + // data longer than 16 bytes will be truncated as mandated by the specs + assert data.length >= 16; + long msb = 0; + for (int i = 0; i < 8; i++) { + msb = (msb << 8) | (data[i] & 0xff); + } + long lsb = 0; + for (int i = 8; i < 16; i++) { + lsb = (lsb << 8) | (data[i] & 0xff); + } + return new UUID(msb, lsb); + } + + private static byte[] toBytes(UUID uuid) { + long msb = uuid.getMostSignificantBits(); + long lsb = uuid.getLeastSignificantBits(); + return toBytes(msb, lsb); + } + + private static byte[] toBytes(long msb, long lsb) { + byte[] out = new byte[16]; + for (int i = 0; i < 8; i++) { + out[i] = (byte) (msb >> ((7 - i) * 8)); + } + for (int i = 8; i < 16; i++) { + out[i] = (byte) (lsb >> ((15 - i) * 8)); + } + return out; } /** @@ -225,10 +513,16 @@ public static UUID random() { * * If you simply need to perform a range query on a {@code timeuuid} column, use the "fake" UUID * generated by {@link #startOf(long)} and {@link #endOf(long)}. + * + *

      Usage with non-blocking threads: beware that this method may block the calling thread on its + * very first invocation, because the node part of time-based UUIDs needs to be computed at that + * moment, and the computation may require the loading of native libraries. If that is a problem, + * consider invoking this method once from a thread that is allowed to block. Subsequent + * invocations are guaranteed not to block. */ @NonNull public static UUID timeBased() { - return new UUID(makeMsb(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE); + return new UUID(makeMsb(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE.get()); } /** @@ -298,7 +592,7 @@ public static long unixTimestamp(@NonNull UUID uuid) { uuid.version())); } long timestamp = uuid.timestamp(); - return (timestamp / 10000) + START_EPOCH; + return (timestamp / 10000) + START_EPOCH_MILLIS; } // Use {@link System#currentTimeMillis} for a base time in milliseconds, and if we are in the same @@ -335,7 +629,7 @@ private static long getCurrentTimestamp() { @VisibleForTesting static long fromUnixTimestamp(long tstamp) { - return (tstamp - START_EPOCH) * 10000; + return (tstamp - START_EPOCH_MILLIS) * 10000; } private static long millisOf(long timestamp) { diff --git a/core/src/main/java/com/datastax/oss/driver/api/package-info.java b/core/src/main/java/com/datastax/oss/driver/api/package-info.java index b783940b313..7b2219647b2 100644 --- a/core/src/main/java/com/datastax/oss/driver/api/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/api/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java index 77490e57416..055ab26909f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,7 +46,7 @@ public AsyncPagingIterableWrapper( new AbstractIterator() { @Override protected TargetT computeNext() { - return (sourceIterator.hasNext()) + return sourceIterator.hasNext() ? elementMapper.apply(sourceIterator.next()) : endOfData(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistry.java deleted file mode 100644 index f76f2d9b0fa..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistry.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collection; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Optional; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Built-in implementation of the protocol version registry, that supports the protocol versions of - * Apache Cassandra. - * - *

      This can be overridden with a custom implementation by subclassing {@link - * DefaultDriverContext}. - * - * @see DefaultProtocolVersion - */ -@ThreadSafe -public class CassandraProtocolVersionRegistry implements ProtocolVersionRegistry { - - private static final Logger LOG = LoggerFactory.getLogger(CassandraProtocolVersionRegistry.class); - private static final ImmutableList values = - ImmutableList.builder().add(DefaultProtocolVersion.values()).build(); - - private final String logPrefix; - private final NavigableMap versionsByCode; - - public CassandraProtocolVersionRegistry(String logPrefix) { - this(logPrefix, DefaultProtocolVersion.values()); - } - - protected CassandraProtocolVersionRegistry(String logPrefix, ProtocolVersion[]... versionRanges) { - this.logPrefix = logPrefix; - this.versionsByCode = byCode(versionRanges); - } - - @Override - public ProtocolVersion fromCode(int code) { - ProtocolVersion protocolVersion = versionsByCode.get(code); - if (protocolVersion == null) { - throw new IllegalArgumentException("Unknown protocol version code: " + code); - } - return protocolVersion; - } - - @Override - public ProtocolVersion fromName(String name) { - for (ProtocolVersion version : versionsByCode.values()) { - if (version.name().equals(name)) { - return version; - } - } - throw new IllegalArgumentException("Unknown protocol version name: " + name); - } - - @Override - public ProtocolVersion highestNonBeta() { - ProtocolVersion highest = versionsByCode.lastEntry().getValue(); - if (!highest.isBeta()) { - return highest; - } else { - return downgrade(highest) - .orElseThrow(() -> new AssertionError("There should be at least one non-beta version")); - } - } - - @Override - public Optional downgrade(ProtocolVersion version) { - Map.Entry previousEntry = - versionsByCode.lowerEntry(version.getCode()); - if (previousEntry == null) { - return Optional.empty(); - } else { - ProtocolVersion previousVersion = previousEntry.getValue(); - // Beta versions are skipped during negotiation - return (previousVersion.isBeta()) ? downgrade(previousVersion) : Optional.of(previousVersion); - } - } - - @Override - public ProtocolVersion highestCommon(Collection nodes) { - if (nodes == null || nodes.isEmpty()) { - throw new IllegalArgumentException("Expected at least one node"); - } - - SortedSet candidates = new TreeSet<>(); - - for (DefaultProtocolVersion version : DefaultProtocolVersion.values()) { - // Beta versions always need to be forced, and we only call this method if the version - // wasn't forced - if (!version.isBeta()) { - candidates.add(version); - } - } - - // The C*<=>protocol mapping is hardcoded in the code below, I don't see a need to be more - // sophisticated right now. - for (Node node : nodes) { - Version version = node.getCassandraVersion(); - if (version == null) { - LOG.warn( - "[{}] Node {} reports null Cassandra version, " - + "ignoring it from optimal protocol version computation", - logPrefix, - node.getEndPoint()); - continue; - } - version = version.nextStable(); - if (version.compareTo(Version.V2_1_0) < 0) { - throw new UnsupportedProtocolVersionException( - node.getEndPoint(), - String.format( - "Node %s reports Cassandra version %s, " - + "but the driver only supports 2.1.0 and above", - node.getEndPoint(), version), - ImmutableList.of(DefaultProtocolVersion.V3, DefaultProtocolVersion.V4)); - } - - LOG.debug( - "[{}] Node {} reports Cassandra version {}", logPrefix, node.getEndPoint(), version); - if (version.compareTo(Version.V2_2_0) < 0 && candidates.remove(DefaultProtocolVersion.V4)) { - LOG.debug("[{}] Excluding protocol V4", logPrefix); - } - } - - if (candidates.isEmpty()) { - // Note: with the current algorithm, this never happens - throw new UnsupportedProtocolVersionException( - null, - String.format( - "Could not determine a common protocol version, " - + "enable DEBUG logs for '%s' for more details", - LOG.getName()), - ImmutableList.of(DefaultProtocolVersion.V3, DefaultProtocolVersion.V4)); - } else { - return candidates.last(); - } - } - - @Override - public boolean supports(ProtocolVersion version, ProtocolFeature feature) { - if (DefaultProtocolFeature.UNSET_BOUND_VALUES.equals(feature)) { - return version.getCode() >= 4; - } else if (DefaultProtocolFeature.PER_REQUEST_KEYSPACE.equals(feature)) { - return version.getCode() >= 5; - } else { - throw new IllegalArgumentException("Unhandled protocol feature: " + feature); - } - } - - @Override - public ImmutableList getValues() { - return values; - } - - private NavigableMap byCode(ProtocolVersion[][] versionRanges) { - NavigableMap map = new TreeMap<>(); - for (ProtocolVersion[] versionRange : versionRanges) { - for (ProtocolVersion version : versionRange) { - ProtocolVersion previous = map.put(version.getCode(), version); - Preconditions.checkArgument( - previous == null, - "Duplicate version code: %s in %s and %s", - version.getCode(), - previous, - version); - } - } - return map; - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java index c9353df9b55..7b66a61636c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ConsistencyLevelRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,6 +31,8 @@ public interface ConsistencyLevelRegistry { int nameToCode(String name); + ConsistencyLevel nameToLevel(String name); + /** @return all the values known to this driver instance. */ Iterable getValues(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java index 110e49be067..bb65661b72f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ContactPoints.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,14 +19,11 @@ import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; +import com.datastax.oss.driver.internal.core.util.AddressUtils; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import com.datastax.oss.driver.shaded.guava.common.collect.Sets; -import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; import org.slf4j.Logger; @@ -39,7 +38,22 @@ public static Set merge( Set result = Sets.newHashSet(programmaticContactPoints); for (String spec : configContactPoints) { - for (InetSocketAddress address : extract(spec, resolve)) { + + Set addresses = Collections.emptySet(); + try { + addresses = AddressUtils.extract(spec, resolve); + } catch (RuntimeException e) { + LOG.warn("Ignoring invalid contact point {} ({})", spec, e.getMessage(), e); + } + + if (addresses.size() > 1) { + LOG.info( + "Contact point {} resolves to multiple addresses, will use them all ({})", + spec, + addresses); + } + + for (InetSocketAddress address : addresses) { DefaultEndPoint endPoint = new DefaultEndPoint(address); boolean wasNew = result.add(endPoint); if (!wasNew) { @@ -49,43 +63,4 @@ public static Set merge( } return ImmutableSet.copyOf(result); } - - private static Set extract(String spec, boolean resolve) { - int separator = spec.lastIndexOf(':'); - if (separator < 0) { - LOG.warn("Ignoring invalid contact point {} (expecting host:port)", spec); - return Collections.emptySet(); - } - - String host = spec.substring(0, separator); - String portSpec = spec.substring(separator + 1); - int port; - try { - port = Integer.parseInt(portSpec); - } catch (NumberFormatException e) { - LOG.warn("Ignoring invalid contact point {} (expecting a number, got {})", spec, portSpec); - return Collections.emptySet(); - } - if (!resolve) { - return ImmutableSet.of(InetSocketAddress.createUnresolved(host, port)); - } else { - try { - InetAddress[] inetAddresses = InetAddress.getAllByName(host); - if (inetAddresses.length > 1) { - LOG.info( - "Contact point {} resolves to multiple addresses, will use them all ({})", - spec, - Arrays.deepToString(inetAddresses)); - } - Set result = new HashSet<>(); - for (InetAddress inetAddress : inetAddresses) { - result.add(new InetSocketAddress(inetAddress, port)); - } - return result; - } catch (UnknownHostException e) { - LOG.warn("Ignoring invalid contact point {} (unknown host {})", spec, host); - return Collections.emptySet(); - } - } - } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java b/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java index 43b2b2fe249..a00da0e4b1a 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/CqlIdentifiers.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,24 +20,56 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.function.Function; public class CqlIdentifiers { - public static List wrap(Iterable in) { - ImmutableList.Builder out = ImmutableList.builder(); + @NonNull + private static List wrap( + @NonNull Iterable in, @NonNull Function fn) { + + Objects.requireNonNull(in, "Input Iterable must not be null"); + Objects.requireNonNull(fn, "CqlIdentifier conversion function must not be null"); + ImmutableList.Builder builder = ImmutableList.builder(); for (String name : in) { - out.add(CqlIdentifier.fromCql(name)); + builder.add(fn.apply(name)); } - return out.build(); + return builder.build(); + } + + @NonNull + public static List wrap(@NonNull Iterable in) { + return wrap(in, CqlIdentifier::fromCql); + } + + @NonNull + public static List wrapInternal(@NonNull Iterable in) { + return wrap(in, CqlIdentifier::fromInternal); } - public static Map wrapKeys(Map in) { - ImmutableMap.Builder out = ImmutableMap.builder(); + @NonNull + private static Map wrapKeys( + @NonNull Map in, @NonNull Function fn) { + Objects.requireNonNull(in, "Input Map must not be null"); + Objects.requireNonNull(fn, "CqlIdentifier conversion function must not be null"); + ImmutableMap.Builder builder = ImmutableMap.builder(); for (Map.Entry entry : in.entrySet()) { - out.put(CqlIdentifier.fromCql(entry.getKey()), entry.getValue()); + builder.put(fn.apply(entry.getKey()), entry.getValue()); } - return out.build(); + return builder.build(); + } + + @NonNull + public static Map wrapKeys(@NonNull Map in) { + return wrapKeys(in, CqlIdentifier::fromCql); + } + + @NonNull + public static Map wrapKeysInternal(@NonNull Map in) { + return wrapKeys(in, CqlIdentifier::fromInternal); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java index ba833674292..b563ad5facc 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultConsistencyLevelRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,6 +48,11 @@ public int nameToCode(String name) { return NAME_TO_CODE.get(name); } + @Override + public ConsistencyLevel nameToLevel(String name) { + return DefaultConsistencyLevel.valueOf(name); + } + @Override public Iterable getValues() { return VALUES; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java index b24a12cb940..8280ae8fec5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultMavenCoordinates.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,7 +53,7 @@ public static DefaultMavenCoordinates buildFromResource(URL resource) { String artifactId = props.getProperty("driver.artifactId"); String version = props.getProperty("driver.version"); if (ByteBuf.class.getPackage().getName().contains("com.datastax.oss.driver.shaded")) { - version += "-shaded"; + artifactId += "-shaded"; } return new DefaultMavenCoordinates(name, groupId, artifactId, Version.parse(version)); } catch (IOException e) { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java index 8d26d1d23f4..5d79f4ed0a5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolFeature.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,5 +37,34 @@ public enum DefaultProtocolFeature implements ProtocolFeature { * @see CASSANDRA-10145 */ PER_REQUEST_KEYSPACE, + + /** + * Support for smallint and tinyint types. + * + * @see CASSANDRA-8951 + */ + SMALLINT_AND_TINYINT_TYPES, + + /** + * Support for the date type. + * + * @see CASSANDRA-7523 + */ + DATE_TYPE, + + /** + * The ability to set a custom "now" time on statements (for testing purposes). + * + * @see CASSANDRA-14664 + */ + NOW_IN_SECONDS, + + /** + * The new protocol framing format introduced in Cassandra 4: wrapping multiple frames into a + * single "segment" to checksum (and possibly compress) them together. + * + * @see CASSANDRA-15299 + */ + MODERN_FRAMING, ; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java new file mode 100644 index 00000000000..80850e8e95a --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistry.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.dse.driver.internal.core.DseProtocolFeature; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Built-in implementation of the protocol version registry, supports all Cassandra and DSE + * versions. + */ +@ThreadSafe +public class DefaultProtocolVersionRegistry implements ProtocolVersionRegistry { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultProtocolVersionRegistry.class); + private static final List allVersions = + ImmutableList.builder() + .add(DefaultProtocolVersion.values()) + .add(DseProtocolVersion.values()) + .build(); + + @VisibleForTesting + static final Version DSE_4_7_0 = Objects.requireNonNull(Version.parse("4.7.0")); + + @VisibleForTesting + static final Version DSE_5_0_0 = Objects.requireNonNull(Version.parse("5.0.0")); + + @VisibleForTesting + static final Version DSE_5_1_0 = Objects.requireNonNull(Version.parse("5.1.0")); + + @VisibleForTesting + static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); + + @VisibleForTesting + static final Version DSE_7_0_0 = Objects.requireNonNull(Version.parse("7.0.0")); + + private final String logPrefix; + + public DefaultProtocolVersionRegistry(String logPrefix) { + this.logPrefix = logPrefix; + } + + @Override + public ProtocolVersion fromName(String name) { + try { + return DefaultProtocolVersion.valueOf(name); + } catch (IllegalArgumentException noOssVersion) { + try { + return DseProtocolVersion.valueOf(name); + } catch (IllegalArgumentException noDseVersion) { + throw new IllegalArgumentException("Unknown protocol version name: " + name); + } + } + } + + @Override + public ProtocolVersion highestNonBeta() { + ProtocolVersion highest = allVersions.get(allVersions.size() - 1); + if (!highest.isBeta()) { + return highest; + } else { + return downgrade(highest) + .orElseThrow(() -> new AssertionError("There should be at least one non-beta version")); + } + } + + @Override + public Optional downgrade(ProtocolVersion version) { + int index = allVersions.indexOf(version); + if (index < 0) { + // This method is called with a value obtained from fromName, so this should never happen + throw new AssertionError(version + " is not a known version"); + } else if (index == 0) { + return Optional.empty(); + } else { + ProtocolVersion previousVersion = allVersions.get(index - 1); + // Beta versions are skipped during negotiation + return previousVersion.isBeta() ? downgrade(previousVersion) : Optional.of(previousVersion); + } + } + + @Override + public ProtocolVersion highestCommon(Collection nodes) { + if (nodes == null || nodes.isEmpty()) { + throw new IllegalArgumentException("Expected at least one node"); + } + + // Start with all non-beta versions (beta versions are always forced, and we don't call this + // method if the version was forced). + Set candidates = new LinkedHashSet<>(); + for (ProtocolVersion version : allVersions) { + if (!version.isBeta()) { + candidates.add(version); + } + } + // Keep an unfiltered copy in case we need to throw an exception below + ImmutableList initialCandidates = ImmutableList.copyOf(candidates); + + // For each node, remove the versions it doesn't support + for (Node node : nodes) { + + // We can't trust the Cassandra version reported by DSE to infer the maximum OSS protocol + // supported. For example DSE 6 reports release_version 4.0-SNAPSHOT, but only supports OSS + // protocol v4 (while Cassandra 4 will support v5). So we treat DSE separately. + Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); + if (dseVersion != null) { + LOG.debug("[{}] Node {} reports DSE version {}", logPrefix, node.getEndPoint(), dseVersion); + dseVersion = dseVersion.nextStable(); + if (dseVersion.compareTo(DSE_4_7_0) < 0) { + throw new UnsupportedProtocolVersionException( + node.getEndPoint(), + String.format( + "Node %s reports DSE version %s, " + + "but the driver only supports 4.7.0 and above", + node.getEndPoint(), dseVersion), + initialCandidates); + } else if (dseVersion.compareTo(DSE_5_0_0) < 0) { + // DSE 4.7.x, 4.8.x + removeHigherThan(DefaultProtocolVersion.V3, null, candidates); + } else if (dseVersion.compareTo(DSE_5_1_0) < 0) { + // DSE 5.0 + removeHigherThan(DefaultProtocolVersion.V4, null, candidates); + } else if (dseVersion.compareTo(DSE_6_0_0) < 0) { + // DSE 5.1 + removeHigherThan(DefaultProtocolVersion.V4, DseProtocolVersion.DSE_V1, candidates); + } else if (dseVersion.compareTo(DSE_7_0_0) < 0) { + // DSE 6 + removeHigherThan(DefaultProtocolVersion.V4, DseProtocolVersion.DSE_V2, candidates); + } else { + // DSE 7.0 + removeHigherThan(DefaultProtocolVersion.V5, DseProtocolVersion.DSE_V2, candidates); + } + } else { // not DSE + Version cassandraVersion = node.getCassandraVersion(); + if (cassandraVersion == null) { + LOG.warn( + "[{}] Node {} reports neither DSE version nor Cassandra version, " + + "ignoring it from optimal protocol version computation", + logPrefix, + node.getEndPoint()); + continue; + } + cassandraVersion = cassandraVersion.nextStable(); + LOG.debug( + "[{}] Node {} reports Cassandra version {}", + logPrefix, + node.getEndPoint(), + cassandraVersion); + if (cassandraVersion.compareTo(Version.V2_1_0) < 0) { + throw new UnsupportedProtocolVersionException( + node.getEndPoint(), + String.format( + "Node %s reports Cassandra version %s, " + + "but the driver only supports 2.1.0 and above", + node.getEndPoint(), cassandraVersion), + ImmutableList.of(DefaultProtocolVersion.V3, DefaultProtocolVersion.V4)); + } else if (cassandraVersion.compareTo(Version.V2_2_0) < 0) { + // 2.1.0 + removeHigherThan(DefaultProtocolVersion.V3, null, candidates); + } else if (cassandraVersion.compareTo(Version.V4_0_0) < 0) { + // 2.2, 3.x + removeHigherThan(DefaultProtocolVersion.V4, null, candidates); + } else { + // 4.0 + removeHigherThan(DefaultProtocolVersion.V5, null, candidates); + } + } + } + + // If we have versions left, return the highest one + ProtocolVersion max = null; + for (ProtocolVersion candidate : candidates) { + if (max == null || max.getCode() < candidate.getCode()) { + max = candidate; + } + } + if (max == null) { // Note: with the current algorithm, this never happens + throw new UnsupportedProtocolVersionException( + null, + String.format( + "Could not determine a common protocol version, " + + "enable DEBUG logs for '%s' for more details", + LOG.getName()), + initialCandidates); + } else { + return max; + } + } + + // Removes all versions strictly higher than the given versions from candidates. A null + // maxDseVersion means "remove all DSE versions". + private void removeHigherThan( + DefaultProtocolVersion maxOssVersion, + DseProtocolVersion maxDseVersion, + Set candidates) { + for (DefaultProtocolVersion ossVersion : DefaultProtocolVersion.values()) { + if (ossVersion.compareTo(maxOssVersion) > 0 && candidates.remove(ossVersion)) { + LOG.debug("[{}] Excluding protocol {}", logPrefix, ossVersion); + } + } + for (DseProtocolVersion dseVersion : DseProtocolVersion.values()) { + if ((maxDseVersion == null || dseVersion.compareTo(maxDseVersion) > 0) + && candidates.remove(dseVersion)) { + LOG.debug("[{}] Excluding protocol {}", logPrefix, dseVersion); + } + } + } + + @Override + public boolean supports(ProtocolVersion version, ProtocolFeature feature) { + int code = version.getCode(); + if (DefaultProtocolFeature.SMALLINT_AND_TINYINT_TYPES.equals(feature) + || DefaultProtocolFeature.DATE_TYPE.equals(feature) + || DefaultProtocolFeature.UNSET_BOUND_VALUES.equals(feature)) { + // All DSE versions and all OSS V4+ + return DefaultProtocolVersion.V4.getCode() <= code; + } else if (DefaultProtocolFeature.PER_REQUEST_KEYSPACE.equals(feature)) { + // Only DSE_V2+ and OSS V5+ + return (DefaultProtocolVersion.V5.getCode() <= code + && code < DseProtocolVersion.DSE_V1.getCode()) + || DseProtocolVersion.DSE_V2.getCode() <= code; + } else if (DefaultProtocolFeature.NOW_IN_SECONDS.equals(feature) + || DefaultProtocolFeature.MODERN_FRAMING.equals(feature)) { + // OSS only, V5+ + return DefaultProtocolVersion.V5.getCode() <= code + && code < DseProtocolVersion.DSE_V1.getCode(); + } else if (DseProtocolFeature.CONTINUOUS_PAGING.equals(feature)) { + // All DSE versions + return DseProtocolVersion.DSE_V1.getCode() <= code; + } else { + throw new IllegalArgumentException("Unhandled protocol feature: " + feature); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java index ab8989ebbd3..1f79f673d02 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/PagingIterableWrapper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -65,7 +67,7 @@ public PagingIterableWrapper( new AbstractIterator() { @Override protected TargetT computeNext() { - return (sourceIterator.hasNext()) + return sourceIterator.hasNext() ? elementMapper.apply(sourceIterator.next()) : endOfData(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java index 7f4e286ea17..bf73f7bbb16 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolFeature.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java index 2f3c3b9a972..eff1d099905 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ProtocolVersionRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,13 +28,6 @@ /** Defines which native protocol versions are supported by a driver instance. */ public interface ProtocolVersionRegistry { - /** - * Look up a version by its {@link ProtocolVersion#getCode()} code}. - * - * @throws IllegalArgumentException if there is no known version with this code. - */ - ProtocolVersion fromCode(int code); - /** * Look up a version by its {@link ProtocolVersion#name() name}. This is used when a version was * forced in the configuration. @@ -69,7 +64,4 @@ public interface ProtocolVersionRegistry { /** Whether a given version supports a given feature. */ boolean supports(ProtocolVersion version, ProtocolFeature feature); - - /** @return all the values known to this driver instance. */ - Iterable getValues(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java index 055da787381..88e6cdb3bb2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java new file mode 100644 index 00000000000..5cc6c2518fb --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslator.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME; + +import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; +import com.datastax.oss.driver.api.core.context.DriverContext; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.net.InetSocketAddress; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This translator always returns same hostname, no matter what IP address a node has but still + * using its native transport port. + * + *

      The translator can be used for scenarios when all nodes are behind some kind of proxy, and it + * is not tailored for one concrete use case. One can use this, for example, for AWS PrivateLink as + * all nodes would be exposed to consumer - behind one hostname pointing to AWS Endpoint. + */ +public class FixedHostNameAddressTranslator implements AddressTranslator { + + private static final Logger LOG = LoggerFactory.getLogger(FixedHostNameAddressTranslator.class); + + private final String advertisedHostname; + private final String logPrefix; + + public FixedHostNameAddressTranslator(@NonNull DriverContext context) { + logPrefix = context.getSessionName(); + advertisedHostname = + context.getConfig().getDefaultProfile().getString(ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME); + } + + @NonNull + @Override + public InetSocketAddress translate(@NonNull InetSocketAddress address) { + final int port = address.getPort(); + LOG.debug("[{}] Resolved {}:{} to {}:{}", logPrefix, address, port, advertisedHostname, port); + return new InetSocketAddress(advertisedHostname, port); + } + + @Override + public void close() {} +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java index 7628d6a0eda..0922821be8c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/PassThroughAddressTranslator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java new file mode 100644 index 00000000000..7c25e94e2f9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/Subnet.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import com.datastax.oss.driver.shaded.guava.common.base.Splitter; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.List; + +class Subnet { + private final byte[] subnet; + private final byte[] networkMask; + private final byte[] upper; + private final byte[] lower; + + private Subnet(byte[] subnet, byte[] networkMask) { + this.subnet = subnet; + this.networkMask = networkMask; + + byte[] upper = new byte[subnet.length]; + byte[] lower = new byte[subnet.length]; + for (int i = 0; i < subnet.length; i++) { + upper[i] = (byte) (subnet[i] | ~networkMask[i]); + lower[i] = (byte) (subnet[i] & networkMask[i]); + } + this.upper = upper; + this.lower = lower; + } + + static Subnet parse(String subnetCIDR) throws UnknownHostException { + List parts = Splitter.on("/").splitToList(subnetCIDR); + if (parts.size() != 2) { + throw new IllegalArgumentException("Invalid subnet: " + subnetCIDR); + } + + boolean isIPv6 = parts.get(0).contains(":"); + byte[] subnet = InetAddress.getByName(parts.get(0)).getAddress(); + if (isIPv4(subnet) && isIPv6) { + subnet = toIPv6(subnet); + } + int prefixLength = Integer.parseInt(parts.get(1)); + validatePrefixLength(subnet, prefixLength); + + byte[] networkMask = toNetworkMask(subnet, prefixLength); + validateSubnetIsPrefixBlock(subnet, networkMask, subnetCIDR); + return new Subnet(subnet, networkMask); + } + + private static byte[] toNetworkMask(byte[] subnet, int prefixLength) { + int fullBytes = prefixLength / 8; + int remainingBits = prefixLength % 8; + byte[] mask = new byte[subnet.length]; + Arrays.fill(mask, 0, fullBytes, (byte) 0xFF); + if (remainingBits > 0) { + mask[fullBytes] = (byte) (0xFF << (8 - remainingBits)); + } + return mask; + } + + private static void validatePrefixLength(byte[] subnet, int prefixLength) { + int max_prefix_length = subnet.length * 8; + if (prefixLength < 0 || max_prefix_length < prefixLength) { + throw new IllegalArgumentException( + String.format( + "Prefix length %s must be within [0; %s]", prefixLength, max_prefix_length)); + } + } + + private static void validateSubnetIsPrefixBlock( + byte[] subnet, byte[] networkMask, String subnetCIDR) { + byte[] prefixBlock = toPrefixBlock(subnet, networkMask); + if (!Arrays.equals(subnet, prefixBlock)) { + throw new IllegalArgumentException( + String.format("Subnet %s must be represented as a network prefix block", subnetCIDR)); + } + } + + private static byte[] toPrefixBlock(byte[] subnet, byte[] networkMask) { + byte[] prefixBlock = new byte[subnet.length]; + for (int i = 0; i < subnet.length; i++) { + prefixBlock[i] = (byte) (subnet[i] & networkMask[i]); + } + return prefixBlock; + } + + @VisibleForTesting + byte[] getSubnet() { + return Arrays.copyOf(subnet, subnet.length); + } + + @VisibleForTesting + byte[] getNetworkMask() { + return Arrays.copyOf(networkMask, networkMask.length); + } + + byte[] getUpper() { + return Arrays.copyOf(upper, upper.length); + } + + byte[] getLower() { + return Arrays.copyOf(lower, lower.length); + } + + boolean isIPv4() { + return isIPv4(subnet); + } + + boolean isIPv6() { + return isIPv6(subnet); + } + + boolean contains(byte[] ip) { + if (isIPv4() && !isIPv4(ip)) { + return false; + } + if (isIPv6() && isIPv4(ip)) { + ip = toIPv6(ip); + } + if (subnet.length != ip.length) { + throw new IllegalArgumentException( + "IP version is unknown: " + Arrays.toString(toZeroBasedByteArray(ip))); + } + for (int i = 0; i < subnet.length; i++) { + if (subnet[i] != (byte) (ip[i] & networkMask[i])) { + return false; + } + } + return true; + } + + private static boolean isIPv4(byte[] ip) { + return ip.length == 4; + } + + private static boolean isIPv6(byte[] ip) { + return ip.length == 16; + } + + private static byte[] toIPv6(byte[] ipv4) { + byte[] ipv6 = new byte[16]; + ipv6[10] = (byte) 0xFF; + ipv6[11] = (byte) 0xFF; + System.arraycopy(ipv4, 0, ipv6, 12, 4); + return ipv6; + } + + @Override + public String toString() { + return Arrays.toString(toZeroBasedByteArray(subnet)); + } + + private static int[] toZeroBasedByteArray(byte[] bytes) { + int[] res = new int[bytes.length]; + for (int i = 0; i < bytes.length; i++) { + res[i] = bytes[i] & 0xFF; + } + return res; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java new file mode 100644 index 00000000000..105e776a507 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddress.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import java.net.InetSocketAddress; +import java.net.UnknownHostException; + +class SubnetAddress { + private final Subnet subnet; + private final InetSocketAddress address; + + SubnetAddress(String subnetCIDR, InetSocketAddress address) { + try { + this.subnet = Subnet.parse(subnetCIDR); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + this.address = address; + } + + InetSocketAddress getAddress() { + return this.address; + } + + boolean isOverlapping(SubnetAddress other) { + Subnet thisSubnet = this.subnet; + Subnet otherSubnet = other.subnet; + return thisSubnet.contains(otherSubnet.getLower()) + || thisSubnet.contains(otherSubnet.getUpper()) + || otherSubnet.contains(thisSubnet.getLower()) + || otherSubnet.contains(thisSubnet.getUpper()); + } + + boolean contains(InetSocketAddress address) { + return subnet.contains(address.getAddress().getAddress()); + } + + boolean isIPv4() { + return subnet.isIPv4(); + } + + boolean isIPv6() { + return subnet.isIPv6(); + } + + @Override + public String toString() { + return "SubnetAddress[subnet=" + subnet + ", address=" + address + "]"; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java new file mode 100644 index 00000000000..85f29e3fadd --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslator.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES; + +import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.util.AddressUtils; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This translator returns the proxy address of the private subnet containing the Cassandra node IP, + * or default address if no matching subnets, or passes through the original node address if no + * default configured. + * + *

      The translator can be used for scenarios when all nodes are behind some kind of proxy, and + * that proxy is different for nodes located in different subnets (eg. when Cassandra is deployed in + * multiple datacenters/regions). One can use this, for example, for Cassandra on Kubernetes with + * different Cassandra datacenters deployed to different Kubernetes clusters. + */ +public class SubnetAddressTranslator implements AddressTranslator { + private static final Logger LOG = LoggerFactory.getLogger(SubnetAddressTranslator.class); + + private final List subnetAddresses; + + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + private final Optional defaultAddress; + + private final String logPrefix; + + public SubnetAddressTranslator(@NonNull DriverContext context) { + logPrefix = context.getSessionName(); + boolean resolveAddresses = + context + .getConfig() + .getDefaultProfile() + .getBoolean(ADDRESS_TRANSLATOR_RESOLVE_ADDRESSES, false); + this.subnetAddresses = + context.getConfig().getDefaultProfile().getStringMap(ADDRESS_TRANSLATOR_SUBNET_ADDRESSES) + .entrySet().stream() + .map( + e -> { + // Quoted and/or containing forward slashes map keys in reference.conf are read to + // strings with additional quotes, eg. 100.64.0.0/15 -> '100.64.0."0/15"' or + // "100.64.0.0/15" -> '"100.64.0.0/15"' + String subnetCIDR = e.getKey().replaceAll("\"", ""); + String address = e.getValue(); + return new SubnetAddress(subnetCIDR, parseAddress(address, resolveAddresses)); + }) + .collect(Collectors.toList()); + this.defaultAddress = + Optional.ofNullable( + context + .getConfig() + .getDefaultProfile() + .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) + .map(address -> parseAddress(address, resolveAddresses)); + + validateSubnetsAreOfSameProtocol(this.subnetAddresses); + validateSubnetsAreNotOverlapping(this.subnetAddresses); + } + + private static void validateSubnetsAreOfSameProtocol(List subnets) { + for (int i = 0; i < subnets.size() - 1; i++) { + for (int j = i + 1; j < subnets.size(); j++) { + SubnetAddress subnet1 = subnets.get(i); + SubnetAddress subnet2 = subnets.get(j); + if (subnet1.isIPv4() != subnet2.isIPv4() && subnet1.isIPv6() != subnet2.isIPv6()) { + throw new IllegalArgumentException( + String.format( + "Configured subnets are of the different protocols: %s, %s", subnet1, subnet2)); + } + } + } + } + + private static void validateSubnetsAreNotOverlapping(List subnets) { + for (int i = 0; i < subnets.size() - 1; i++) { + for (int j = i + 1; j < subnets.size(); j++) { + SubnetAddress subnet1 = subnets.get(i); + SubnetAddress subnet2 = subnets.get(j); + if (subnet1.isOverlapping(subnet2)) { + throw new IllegalArgumentException( + String.format("Configured subnets are overlapping: %s, %s", subnet1, subnet2)); + } + } + } + } + + @NonNull + @Override + public InetSocketAddress translate(@NonNull InetSocketAddress address) { + InetSocketAddress translatedAddress = null; + for (SubnetAddress subnetAddress : subnetAddresses) { + if (subnetAddress.contains(address)) { + translatedAddress = subnetAddress.getAddress(); + } + } + if (translatedAddress == null && defaultAddress.isPresent()) { + translatedAddress = defaultAddress.get(); + } + if (translatedAddress == null) { + translatedAddress = address; + } + LOG.debug("[{}] Translated {} to {}", logPrefix, address, translatedAddress); + return translatedAddress; + } + + @Override + public void close() {} + + @Nullable + private InetSocketAddress parseAddress(String address, boolean resolve) { + try { + InetSocketAddress parsedAddress = AddressUtils.extract(address, resolve).iterator().next(); + LOG.debug("[{}] Parsed {} to {}", logPrefix, address, parsedAddress); + return parsedAddress; + } catch (RuntimeException e) { + throw new IllegalArgumentException( + String.format("Invalid address %s (%s)", address, e.getMessage()), e); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java index 6ccf1651e1f..5078428c21a 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRequestHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,8 @@ import com.datastax.oss.driver.api.core.DriverTimeoutException; import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.connection.BusyConnectionException; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import com.datastax.oss.driver.internal.core.channel.ResponseCallback; @@ -28,6 +32,7 @@ import com.datastax.oss.protocol.internal.request.Query; import com.datastax.oss.protocol.internal.request.query.QueryOptions; import com.datastax.oss.protocol.internal.response.Result; +import com.datastax.oss.protocol.internal.response.result.Prepared; import com.datastax.oss.protocol.internal.response.result.Rows; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.ScheduledFuture; @@ -44,17 +49,25 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Handles the lifecyle of an admin request (such as a node refresh or schema refresh query). */ +/** Handles the lifecycle of an admin request (such as a node refresh or schema refresh query). */ @ThreadSafe -public class AdminRequestHandler implements ResponseCallback { +public class AdminRequestHandler implements ResponseCallback { private static final Logger LOG = LoggerFactory.getLogger(AdminRequestHandler.class); - public static AdminRequestHandler query( + public static AdminRequestHandler call( DriverChannel channel, Query query, Duration timeout, String logPrefix) { - return createAdminRequestHandler(channel, query, Collections.emptyMap(), timeout, logPrefix); + return new AdminRequestHandler<>( + channel, + true, + query, + Frame.NO_PAYLOAD, + timeout, + logPrefix, + "call '" + query.query + "'", + com.datastax.oss.protocol.internal.response.result.Void.class); } - public static AdminRequestHandler query( + public static AdminRequestHandler query( DriverChannel channel, String query, Map parameters, @@ -65,58 +78,61 @@ public static AdminRequestHandler query( new Query( query, buildQueryOptions(pageSize, serialize(parameters, channel.protocolVersion()), null)); - return createAdminRequestHandler(channel, message, parameters, timeout, logPrefix); - } - - private static AdminRequestHandler createAdminRequestHandler( - DriverChannel channel, - Query message, - Map parameters, - Duration timeout, - String logPrefix) { - String debugString = "query '" + message.query + "'"; if (!parameters.isEmpty()) { debugString += " with parameters " + parameters; } - return new AdminRequestHandler( - channel, message, Frame.NO_PAYLOAD, timeout, logPrefix, debugString); + return new AdminRequestHandler<>( + channel, true, message, Frame.NO_PAYLOAD, timeout, logPrefix, debugString, Rows.class); } - public static AdminRequestHandler query( + public static AdminRequestHandler query( DriverChannel channel, String query, Duration timeout, int pageSize, String logPrefix) { return query(channel, query, Collections.emptyMap(), timeout, pageSize, logPrefix); } private final DriverChannel channel; + private final boolean shouldPreAcquireId; private final Message message; private final Map customPayload; private final Duration timeout; private final String logPrefix; private final String debugString; - protected final CompletableFuture result = new CompletableFuture<>(); + private final Class expectedResponseType; + protected final CompletableFuture result = new CompletableFuture<>(); // This is only ever accessed on the channel's event loop, so it doesn't need to be volatile private ScheduledFuture timeoutFuture; - public AdminRequestHandler( + protected AdminRequestHandler( DriverChannel channel, + boolean shouldPreAcquireId, Message message, Map customPayload, Duration timeout, String logPrefix, - String debugString) { + String debugString, + Class expectedResponseType) { this.channel = channel; + this.shouldPreAcquireId = shouldPreAcquireId; this.message = message; this.customPayload = customPayload; this.timeout = timeout; this.logPrefix = logPrefix; this.debugString = debugString; + this.expectedResponseType = expectedResponseType; } - public CompletionStage start() { + public CompletionStage start() { LOG.debug("[{}] Executing {}", logPrefix, this); - channel.write(message, false, customPayload, this).addListener(this::onWriteComplete); + if (shouldPreAcquireId && !channel.preAcquireId()) { + setFinalError( + new BusyConnectionException( + String.format( + "%s has reached its maximum number of simultaneous requests", channel))); + } else { + channel.write(message, false, customPayload, this).addListener(this::onWriteComplete); + } return result; } @@ -158,22 +174,31 @@ public void onResponse(Frame responseFrame) { } Message message = responseFrame.message; LOG.debug("[{}] Got response {}", logPrefix, responseFrame.message); - if (message instanceof Rows) { + if (!expectedResponseType.isInstance(message)) { + // Note that this also covers error responses, no need to get too fancy here + setFinalError(new UnexpectedResponseException(debugString, message)); + } else if (expectedResponseType == Rows.class) { Rows rows = (Rows) message; ByteBuffer pagingState = rows.getMetadata().pagingState; AdminRequestHandler nextHandler = (pagingState == null) ? null : this.copy(pagingState); - setFinalResult(new AdminResult(rows, nextHandler, channel.protocolVersion())); - } else if (message instanceof Result) { - - // Internal prepares are only "reprepare on up" types of queries, where we only care about - // success, not the actual result, so this is good enough: + // The public factory methods guarantee that expectedResponseType and ResultT always match: + @SuppressWarnings("unchecked") + ResultT result = (ResultT) new AdminResult(rows, nextHandler, channel.protocolVersion()); + setFinalResult(result); + } else if (expectedResponseType == Prepared.class) { + Prepared prepared = (Prepared) message; + @SuppressWarnings("unchecked") + ResultT result = (ResultT) ByteBuffer.wrap(prepared.preparedQueryId); + setFinalResult(result); + } else if (expectedResponseType + == com.datastax.oss.protocol.internal.response.result.Void.class) { setFinalResult(null); } else { - setFinalError(new UnexpectedResponseException(debugString, message)); + setFinalError(new AssertionError("Unhandled response type" + expectedResponseType)); } } - protected boolean setFinalResult(AdminResult result) { + protected boolean setFinalResult(ResultT result) { return this.result.complete(result); } @@ -181,19 +206,22 @@ protected boolean setFinalError(Throwable error) { return result.completeExceptionally(error); } - private AdminRequestHandler copy(ByteBuffer pagingState) { + private AdminRequestHandler copy(ByteBuffer pagingState) { assert message instanceof Query; Query current = (Query) this.message; QueryOptions currentOptions = current.options; QueryOptions newOptions = buildQueryOptions(currentOptions.pageSize, currentOptions.namedValues, pagingState); - return new AdminRequestHandler( + return new AdminRequestHandler<>( channel, + // This is called for next page queries, so we always need to reacquire an id: + true, new Query(current.query, newOptions), customPayload, timeout, logPrefix, - debugString); + debugString, + expectedResponseType); } private static QueryOptions buildQueryOptions( @@ -206,8 +234,9 @@ private static QueryOptions buildQueryOptions( pageSize, pagingState, ProtocolConstants.ConsistencyLevel.SERIAL, - Long.MIN_VALUE, - null); + Statement.NO_DEFAULT_TIMESTAMP, + null, + Statement.NO_NOW_IN_SECONDS); } private static Map serialize( @@ -228,6 +257,8 @@ private static ByteBuffer serialize(Object parameter, ProtocolVersion protocolVe @SuppressWarnings("unchecked") List l = (List) parameter; return AdminRow.LIST_OF_TEXT.encode(l, protocolVersion); + } else if (parameter instanceof Integer) { + return TypeCodecs.INT.encode((Integer) parameter, protocolVersion); } else { throw new IllegalArgumentException( "Unsupported variable type for admin query: " + parameter.getClass()); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java index d40a85049fc..686cc05c6b0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminResult.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,10 +37,11 @@ public class AdminResult implements Iterable { private final Queue> data; private final Map columnSpecs; - private final AdminRequestHandler nextHandler; + private final AdminRequestHandler nextHandler; private final ProtocolVersion protocolVersion; - public AdminResult(Rows rows, AdminRequestHandler nextHandler, ProtocolVersion protocolVersion) { + public AdminResult( + Rows rows, AdminRequestHandler nextHandler, ProtocolVersion protocolVersion) { this.data = rows.getData(); ImmutableMap.Builder columnSpecsBuilder = ImmutableMap.builder(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java index efcb3ce80d9..6e32ea845fb 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/AdminRow.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -100,6 +102,15 @@ public Map getMapOfStringToString(String columnName) { return get(columnName, MAP_OF_STRING_TO_STRING); } + public boolean isNull(String columnName) { + if (!contains(columnName)) { + return true; + } else { + int index = columnSpecs.get(columnName).index; + return data.get(index) == null; + } + } + public boolean contains(String columnName) { return columnSpecs.containsKey(columnName); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java index a4d9809a32d..40ab21b759a 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/ThrottledAdminRequestHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +19,19 @@ import com.datastax.oss.driver.api.core.DriverTimeoutException; import com.datastax.oss.driver.api.core.RequestThrottlingException; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; import com.datastax.oss.driver.api.core.session.throttling.Throttled; import com.datastax.oss.driver.internal.core.channel.DriverChannel; +import com.datastax.oss.driver.internal.core.control.ControlConnection; import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.DefaultSession; import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.response.Result; +import com.datastax.oss.protocol.internal.response.result.Prepared; +import com.datastax.oss.protocol.internal.response.result.Rows; import edu.umd.cs.findbugs.annotations.NonNull; import java.nio.ByteBuffer; import java.time.Duration; @@ -32,29 +41,97 @@ import net.jcip.annotations.ThreadSafe; @ThreadSafe -public class ThrottledAdminRequestHandler extends AdminRequestHandler implements Throttled { +public class ThrottledAdminRequestHandler extends AdminRequestHandler + implements Throttled { + + /** + * @param shouldPreAcquireId whether to call {@link DriverChannel#preAcquireId()} before sending + * the request. This must be false if you obtained the connection from a pool ({@link + * ChannelPool#next()}, or {@link DefaultSession#getChannel(Node, String)}). It must be + * true if you are using a standalone channel (e.g. in {@link ControlConnection} or one of + * its auxiliary components). + */ + public static ThrottledAdminRequestHandler query( + DriverChannel channel, + boolean shouldPreAcquireId, + Message message, + Map customPayload, + Duration timeout, + RequestThrottler throttler, + SessionMetricUpdater metricUpdater, + String logPrefix, + String debugString) { + return new ThrottledAdminRequestHandler<>( + channel, + shouldPreAcquireId, + message, + customPayload, + timeout, + throttler, + metricUpdater, + logPrefix, + debugString, + Rows.class); + } + + /** + * @param shouldPreAcquireId whether to call {@link DriverChannel#preAcquireId()} before sending + * the request. See {@link #query(DriverChannel, boolean, Message, Map, Duration, + * RequestThrottler, SessionMetricUpdater, String, String)} for more explanations. + */ + public static ThrottledAdminRequestHandler prepare( + DriverChannel channel, + boolean shouldPreAcquireId, + Message message, + Map customPayload, + Duration timeout, + RequestThrottler throttler, + SessionMetricUpdater metricUpdater, + String logPrefix) { + return new ThrottledAdminRequestHandler<>( + channel, + shouldPreAcquireId, + message, + customPayload, + timeout, + throttler, + metricUpdater, + logPrefix, + message.toString(), + Prepared.class); + } private final long startTimeNanos; private final RequestThrottler throttler; private final SessionMetricUpdater metricUpdater; - public ThrottledAdminRequestHandler( + protected ThrottledAdminRequestHandler( DriverChannel channel, + boolean preAcquireId, Message message, Map customPayload, Duration timeout, RequestThrottler throttler, SessionMetricUpdater metricUpdater, String logPrefix, - String debugString) { - super(channel, message, customPayload, timeout, logPrefix, debugString); + String debugString, + Class expectedResponseType) { + super( + channel, + preAcquireId, + message, + customPayload, + timeout, + logPrefix, + debugString, + expectedResponseType); this.startTimeNanos = System.nanoTime(); this.throttler = throttler; this.metricUpdater = metricUpdater; } @Override - public CompletionStage start() { + public CompletionStage start() { // Don't write request yet, wait for green light from throttler throttler.register(this); return result; @@ -79,7 +156,7 @@ public void onThrottleFailure(@NonNull RequestThrottlingException error) { } @Override - protected boolean setFinalResult(AdminResult result) { + protected boolean setFinalResult(ResultT result) { boolean wasSet = super.setFinalResult(result); if (wasSet) { throttler.signalSuccess(this); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java index f23a7e90537..c842b655411 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/UnexpectedResponseException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java index e12c4db8514..55ab14c8981 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/adminrequest/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java b/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java index 2946a89b71a..f2dfdf14171 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,13 @@ */ package com.datastax.oss.driver.internal.core.auth; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.internal.core.auth.AuthUtils; +import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.EndPoint; import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.ThreadSafe; @@ -34,11 +40,19 @@ * class = com.datastax.driver.api.core.auth.PlainTextAuthProvider * username = cassandra * password = cassandra + * + * // If connecting to DataStax Enterprise, this additional option allows proxy authentication + * // (login as another user or role) + * authorization-id = userOrRole * } * } * * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. + * The authentication provider cannot be changed at runtime; however, the credentials can be changed + * at runtime: the new ones will be used for new connection attempts once the configuration gets + * {@linkplain com.datastax.oss.driver.api.core.config.DriverConfigLoader#reload() reloaded}. + * + *

      See {@code reference.conf} (in the manual or core driver JAR) for more details. */ @ThreadSafe public class PlainTextAuthProvider extends PlainTextAuthProviderBase { @@ -52,9 +66,22 @@ public PlainTextAuthProvider(DriverContext context) { @NonNull @Override - protected Credentials getCredentials() { + protected Credentials getCredentials( + @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { + // It's not valid to use the PlainTextAuthProvider without a username or password, error out + // early here + AuthUtils.validateConfigPresent( + config, + PlainTextAuthProvider.class.getName(), + endPoint, + DefaultDriverOption.AUTH_PROVIDER_USER_NAME, + DefaultDriverOption.AUTH_PROVIDER_PASSWORD); + + String authorizationId = config.getString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, ""); + assert authorizationId != null; // per the default above return new Credentials( config.getString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME).toCharArray(), - config.getString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD).toCharArray()); + config.getString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD).toCharArray(), + authorizationId.toCharArray()); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProviderBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProviderBase.java deleted file mode 100644 index 723c1dbf584..00000000000 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProviderBase.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.auth; - -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.auth.Authenticator; -import com.datastax.oss.driver.api.core.auth.SyncAuthenticator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.shaded.guava.common.base.Charsets; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.util.Arrays; -import java.util.Objects; -import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Common infrastructure for plain text auth providers. - * - *

      This can be reused to write an implementation that retrieves the credentials from another - * source than the configuration. - */ -@ThreadSafe -public abstract class PlainTextAuthProviderBase implements AuthProvider { - - private static final Logger LOG = LoggerFactory.getLogger(PlainTextAuthProviderBase.class); - - private final String logPrefix; - - /** - * @param logPrefix a string that will get prepended to the logs (this is used for discrimination - * when you have multiple driver instances executing in the same JVM). Built-in - * implementations fill this with {@link Session#getName()}. - */ - protected PlainTextAuthProviderBase(@NonNull String logPrefix) { - this.logPrefix = Objects.requireNonNull(logPrefix); - } - - /** - * Retrieves the credentials from the underlying source. - * - *

      This is invoked every time the driver opens a new connection. - */ - @NonNull - protected abstract Credentials getCredentials(); - - @NonNull - @Override - public Authenticator newAuthenticator( - @NonNull EndPoint endPoint, @NonNull String serverAuthenticator) { - return new PlainTextAuthenticator(getCredentials()); - } - - @Override - public void onMissingChallenge(@NonNull EndPoint endPoint) { - LOG.warn( - "[{}] {} did not send an authentication challenge; " - + "This is suspicious because the driver expects authentication", - logPrefix, - endPoint); - } - - @Override - public void close() { - // nothing to do - } - - protected static class Credentials { - - private final char[] username; - private final char[] password; - - public Credentials(@NonNull char[] username, @NonNull char[] password) { - this.username = Objects.requireNonNull(username); - this.password = Objects.requireNonNull(password); - } - - @NonNull - public char[] getUsername() { - return username; - } - - @NonNull - public char[] getPassword() { - return password; - } - - /** Clears the credentials from memory when they're no longer needed. */ - protected void clear() { - // Note: this is a bit irrelevant with the built-in provider, because the config already - // caches the credentials in memory. But it might be useful for a custom implementation that - // retrieves the credentials from a different source. - Arrays.fill(getUsername(), (char) 0); - Arrays.fill(getPassword(), (char) 0); - } - } - - protected static class PlainTextAuthenticator implements SyncAuthenticator { - - private final ByteBuffer initialToken; - - protected PlainTextAuthenticator(@NonNull Credentials credentials) { - Objects.requireNonNull(credentials); - ByteBuffer usernameBytes = toUtf8Bytes(credentials.getUsername()); - ByteBuffer passwordBytes = toUtf8Bytes(credentials.getPassword()); - credentials.clear(); - - this.initialToken = - ByteBuffer.allocate(usernameBytes.remaining() + passwordBytes.remaining() + 2); - initialToken.put((byte) 0); - initialToken.put(usernameBytes); - initialToken.put((byte) 0); - initialToken.put(passwordBytes); - initialToken.flip(); - - // Clear temporary buffers - usernameBytes.rewind(); - while (usernameBytes.remaining() > 0) { - usernameBytes.put((byte) 0); - } - passwordBytes.rewind(); - while (passwordBytes.remaining() > 0) { - passwordBytes.put((byte) 0); - } - } - - private ByteBuffer toUtf8Bytes(char[] charArray) { - CharBuffer charBuffer = CharBuffer.wrap(charArray); - return Charsets.UTF_8.encode(charBuffer); - } - - @Override - @Nullable - public ByteBuffer initialResponseSync() { - return initialToken.duplicate(); - } - - @Override - @Nullable - public ByteBuffer evaluateChallengeSync(@Nullable ByteBuffer token) { - return null; - } - - @Override - public void onAuthenticationSuccessSync(@Nullable ByteBuffer token) { - // no-op, the server should send nothing anyway - } - } -} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java index a2cb5d35956..970ea061ec7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java index cd936bd4b0e..66a5c4edc0e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +17,17 @@ */ package com.datastax.oss.driver.internal.core.channel; +import com.datastax.oss.driver.api.core.ConsistencyLevel; import com.datastax.oss.driver.api.core.ProtocolVersion; import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.config.typesafe.TypesafeDriverConfig; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.context.NettyOptions; import com.datastax.oss.driver.internal.core.metadata.DefaultNode; @@ -33,6 +38,7 @@ import com.datastax.oss.driver.internal.core.protocol.FrameEncoder; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import io.netty.bootstrap.Bootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -40,10 +46,12 @@ import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,13 +62,49 @@ public class ChannelFactory { private static final Logger LOG = LoggerFactory.getLogger(ChannelFactory.class); + /** + * A value for {@link #productType} that indicates that we are connected to DataStax Cloud. This + * value matches the one defined at DSE DB server side at {@code ProductType.java}. + */ + private static final String DATASTAX_CLOUD_PRODUCT_TYPE = "DATASTAX_APOLLO"; + + private static final AtomicBoolean LOGGED_ORPHAN_WARNING = new AtomicBoolean(); + + /** + * A value for {@link #productType} that indicates that the server does not report any product + * type. + */ + private static final String UNKNOWN_PRODUCT_TYPE = "UNKNOWN"; + + // The names of the handlers on the pipeline: + public static final String SSL_HANDLER_NAME = "ssl"; + public static final String INBOUND_TRAFFIC_METER_NAME = "inboundTrafficMeter"; + public static final String OUTBOUND_TRAFFIC_METER_NAME = "outboundTrafficMeter"; + public static final String FRAME_TO_BYTES_ENCODER_NAME = "frameToBytesEncoder"; + public static final String FRAME_TO_SEGMENT_ENCODER_NAME = "frameToSegmentEncoder"; + public static final String SEGMENT_TO_BYTES_ENCODER_NAME = "segmentToBytesEncoder"; + public static final String BYTES_TO_FRAME_DECODER_NAME = "bytesToFrameDecoder"; + public static final String BYTES_TO_SEGMENT_DECODER_NAME = "bytesToSegmentDecoder"; + public static final String SEGMENT_TO_FRAME_DECODER_NAME = "segmentToFrameDecoder"; + public static final String HEARTBEAT_HANDLER_NAME = "heartbeat"; + public static final String INFLIGHT_HANDLER_NAME = "inflight"; + public static final String INIT_HANDLER_NAME = "init"; + private final String logPrefix; protected final InternalDriverContext context; /** either set from the configuration, or null and will be negotiated */ - @VisibleForTesting ProtocolVersion protocolVersion; + @VisibleForTesting volatile ProtocolVersion protocolVersion; + + private volatile String clusterName; - @VisibleForTesting volatile String clusterName; + /** + * The value of the {@code PRODUCT_TYPE} option reported by the first channel we opened, in + * response to a {@code SUPPORTED} request. + * + *

      If the server does not return that option, the value will be {@link #UNKNOWN_PRODUCT_TYPE}. + */ + @VisibleForTesting volatile String productType; public ChannelFactory(InternalDriverContext context) { this.logPrefix = context.getSessionName(); @@ -92,6 +136,10 @@ public void setProtocolVersion(ProtocolVersion newVersion) { this.protocolVersion = newVersion; } + public String getClusterName() { + return clusterName; + } + public CompletionStage connect(Node node, DriverChannelOptions options) { NodeMetricUpdater nodeMetricUpdater; if (node instanceof DefaultNode) { @@ -166,6 +214,24 @@ private void connect( if (ChannelFactory.this.clusterName == null) { ChannelFactory.this.clusterName = driverChannel.getClusterName(); } + Map> supportedOptions = driverChannel.getOptions(); + if (ChannelFactory.this.productType == null && supportedOptions != null) { + List productTypes = supportedOptions.get("PRODUCT_TYPE"); + String productType = + productTypes != null && !productTypes.isEmpty() + ? productTypes.get(0) + : UNKNOWN_PRODUCT_TYPE; + ChannelFactory.this.productType = productType; + DriverConfig driverConfig = context.getConfig(); + if (driverConfig instanceof TypesafeDriverConfig + && productType.equals(DATASTAX_CLOUD_PRODUCT_TYPE)) { + ((TypesafeDriverConfig) driverConfig) + .overrideDefaults( + ImmutableMap.of( + DefaultDriverOption.REQUEST_CONSISTENCY, + ConsistencyLevel.LOCAL_QUORUM.name())); + } + } resultFuture.complete(driverChannel); } else { Throwable error = connectFuture.cause(); @@ -174,7 +240,7 @@ private void connect( Optional downgraded = context.getProtocolVersionRegistry().downgrade(currentVersion); if (downgraded.isPresent()) { - LOG.info( + LOG.debug( "[{}] Failed to connect with protocol {}, retrying with {}", logPrefix, currentVersion, @@ -208,75 +274,121 @@ ChannelInitializer initializer( DriverChannelOptions options, NodeMetricUpdater nodeMetricUpdater, CompletableFuture resultFuture) { - return new ChannelInitializer() { - @Override - protected void initChannel(Channel channel) { - try { - DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); - - long setKeyspaceTimeoutMillis = - defaultConfig - .getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT) - .toMillis(); - int maxFrameLength = - (int) defaultConfig.getBytes(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH); - int maxRequestsPerConnection = - defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); - int maxOrphanRequests = - defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS); - - InFlightHandler inFlightHandler = - new InFlightHandler( - protocolVersion, - new StreamIdGenerator(maxRequestsPerConnection), - maxOrphanRequests, - setKeyspaceTimeoutMillis, - channel.newPromise(), - options.eventCallback, - options.ownerLogPrefix); - HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultConfig); - ProtocolInitHandler initHandler = - new ProtocolInitHandler( - context, protocolVersion, clusterName, endPoint, options, heartbeatHandler); - - ChannelPipeline pipeline = channel.pipeline(); - context - .getSslHandlerFactory() - .map(f -> f.newSslHandler(channel, endPoint)) - .map(h -> pipeline.addLast("ssl", h)); - - // Only add meter handlers on the pipeline if metrics are enabled. - SessionMetricUpdater sessionMetricUpdater = - context.getMetricsFactory().getSessionUpdater(); - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_RECEIVED, null) - || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_RECEIVED, null)) { - pipeline.addLast( - "inboundTrafficMeter", - new InboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); - } + return new ChannelFactoryInitializer( + endPoint, protocolVersion, options, nodeMetricUpdater, resultFuture); + }; + + class ChannelFactoryInitializer extends ChannelInitializer { + + private final EndPoint endPoint; + private final ProtocolVersion protocolVersion; + private final DriverChannelOptions options; + private final NodeMetricUpdater nodeMetricUpdater; + private final CompletableFuture resultFuture; + + ChannelFactoryInitializer( + EndPoint endPoint, + ProtocolVersion protocolVersion, + DriverChannelOptions options, + NodeMetricUpdater nodeMetricUpdater, + CompletableFuture resultFuture) { - if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_SENT, null) - || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_SENT, null)) { - pipeline.addLast( - "outboundTrafficMeter", - new OutboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); + this.endPoint = endPoint; + this.protocolVersion = protocolVersion; + this.options = options; + this.nodeMetricUpdater = nodeMetricUpdater; + this.resultFuture = resultFuture; + } + + @Override + protected void initChannel(Channel channel) { + try { + DriverExecutionProfile defaultConfig = context.getConfig().getDefaultProfile(); + + long setKeyspaceTimeoutMillis = + defaultConfig + .getDuration(DefaultDriverOption.CONNECTION_SET_KEYSPACE_TIMEOUT) + .toMillis(); + int maxFrameLength = + (int) defaultConfig.getBytes(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH); + int maxRequestsPerConnection = + defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS); + int maxOrphanRequests = + defaultConfig.getInt(DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS); + if (maxOrphanRequests >= maxRequestsPerConnection) { + if (LOGGED_ORPHAN_WARNING.compareAndSet(false, true)) { + LOG.warn( + "[{}] Invalid value for {}: {}. It must be lower than {}. " + + "Defaulting to {} (1/4 of max-requests) instead.", + logPrefix, + DefaultDriverOption.CONNECTION_MAX_ORPHAN_REQUESTS.getPath(), + maxOrphanRequests, + DefaultDriverOption.CONNECTION_MAX_REQUESTS.getPath(), + maxRequestsPerConnection / 4); } + maxOrphanRequests = maxRequestsPerConnection / 4; + } - pipeline - .addLast("encoder", new FrameEncoder(context.getFrameCodec(), maxFrameLength)) - .addLast("decoder", new FrameDecoder(context.getFrameCodec(), maxFrameLength)) - // Note: HeartbeatHandler is inserted here once init completes - .addLast("inflight", inFlightHandler) - .addLast("init", initHandler); - - context.getNettyOptions().afterChannelInitialized(channel); - } catch (Throwable t) { - // If the init handler throws an exception, Netty swallows it and closes the channel. We - // want to propagate it instead, so fail the outer future (the result of connect()). - resultFuture.completeExceptionally(t); - throw t; + InFlightHandler inFlightHandler = + new InFlightHandler( + protocolVersion, + new StreamIdGenerator(maxRequestsPerConnection), + maxOrphanRequests, + setKeyspaceTimeoutMillis, + channel.newPromise(), + options.eventCallback, + options.ownerLogPrefix); + HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultConfig); + ProtocolInitHandler initHandler = + new ProtocolInitHandler( + context, + protocolVersion, + clusterName, + endPoint, + options, + heartbeatHandler, + productType == null); + + ChannelPipeline pipeline = channel.pipeline(); + context + .getSslHandlerFactory() + .map(f -> f.newSslHandler(channel, endPoint)) + .map(h -> pipeline.addLast(SSL_HANDLER_NAME, h)); + + // Only add meter handlers on the pipeline if metrics are enabled. + SessionMetricUpdater sessionMetricUpdater = context.getMetricsFactory().getSessionUpdater(); + if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_RECEIVED, null) + || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_RECEIVED, null)) { + pipeline.addLast( + INBOUND_TRAFFIC_METER_NAME, + new InboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); + } + + if (nodeMetricUpdater.isEnabled(DefaultNodeMetric.BYTES_SENT, null) + || sessionMetricUpdater.isEnabled(DefaultSessionMetric.BYTES_SENT, null)) { + pipeline.addLast( + OUTBOUND_TRAFFIC_METER_NAME, + new OutboundTrafficMeter(nodeMetricUpdater, sessionMetricUpdater)); } + + pipeline + .addLast( + FRAME_TO_BYTES_ENCODER_NAME, + new FrameEncoder(context.getFrameCodec(), maxFrameLength)) + .addLast( + BYTES_TO_FRAME_DECODER_NAME, + new FrameDecoder(context.getFrameCodec(), maxFrameLength)) + // Note: HeartbeatHandler is inserted here once init completes + .addLast(INFLIGHT_HANDLER_NAME, inFlightHandler) + .addLast(INIT_HANDLER_NAME, initHandler); + + context.getNettyOptions().afterChannelInitialized(channel); + } catch (Throwable t) { + // If the init handler throws an exception, Netty swallows it and closes the channel. We + // want to propagate it instead, so fail the outer future (the result of connect()). + resultFuture.completeExceptionally(t); + throw t; } - }; + } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java index 0a977b97573..3ba3d70eb8d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerRequest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.channel; import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.connection.BusyConnectionException; import com.datastax.oss.driver.internal.core.util.ProtocolUtils; import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; import com.datastax.oss.protocol.internal.Frame; @@ -35,6 +38,7 @@ abstract class ChannelHandlerRequest implements ResponseCallback { final Channel channel; final ChannelHandlerContext ctx; + final InFlightHandler inFlightHandler; private final long timeoutMillis; private ScheduledFuture timeoutFuture; @@ -42,6 +46,8 @@ abstract class ChannelHandlerRequest implements ResponseCallback { ChannelHandlerRequest(ChannelHandlerContext ctx, long timeoutMillis) { this.ctx = ctx; this.channel = ctx.channel(); + this.inFlightHandler = ctx.pipeline().get(InFlightHandler.class); + assert inFlightHandler != null; this.timeoutMillis = timeoutMillis; } @@ -60,10 +66,17 @@ void fail(Throwable cause) { void send() { assert channel.eventLoop().inEventLoop(); - DriverChannel.RequestMessage message = - new DriverChannel.RequestMessage(getRequest(), false, Frame.NO_PAYLOAD, this); - ChannelFuture writeFuture = channel.writeAndFlush(message); - writeFuture.addListener(this::writeListener); + if (!inFlightHandler.preAcquireId()) { + fail( + new BusyConnectionException( + String.format( + "%s has reached its maximum number of simultaneous requests", channel))); + } else { + DriverChannel.RequestMessage message = + new DriverChannel.RequestMessage(getRequest(), false, Frame.NO_PAYLOAD, this); + ChannelFuture writeFuture = channel.writeAndFlush(message); + writeFuture.addListener(this::writeListener); + } } private void writeListener(Future writeFuture) { @@ -71,7 +84,9 @@ private void writeListener(Future writeFuture) { timeoutFuture = channel.eventLoop().schedule(this::onTimeout, timeoutMillis, TimeUnit.MILLISECONDS); } else { - fail(describe() + ": error writing ", writeFuture.cause()); + String message = + String.format("%s: failed to send request (%s)", describe(), writeFuture.cause()); + fail(message, writeFuture.cause()); } } @@ -87,7 +102,8 @@ public final void onFailure(Throwable error) { if (timeoutFuture != null) { timeoutFuture.cancel(true); } - fail(describe() + ": unexpected failure", error); + String message = String.format("%s: unexpected failure (%s)", describe(), error); + fail(message, error); } private void onTimeout() { @@ -104,13 +120,13 @@ void failOnUnexpected(Message response) { fail( new IllegalArgumentException( String.format( - "%s: unexpected server error [%s] %s", + "%s: server replied with unexpected error code [%s]: %s", describe(), ProtocolUtils.errorCodeString(error.code), error.message))); } else { fail( new IllegalArgumentException( String.format( - "%s: unexpected server response opcode=%s", + "%s: server replied with unexpected response type (opcode=%s)", describe(), ProtocolUtils.opcodeString(response.opcode)))); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java index 04abdfb0368..8e47db3fb1b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ClusterNameMismatchException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java index 4c7b7f642fe..789981b4832 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +17,10 @@ */ package com.datastax.oss.driver.internal.core.channel; +import com.datastax.oss.driver.internal.core.util.concurrent.PromiseCombiner; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.PromiseCombiner; import java.net.SocketAddress; import net.jcip.annotations.NotThreadSafe; @@ -58,9 +59,7 @@ public void connect( realConnectPromise.addListener(future -> onRealConnect(ctx)); // Make the caller's promise wait on the other two: - PromiseCombiner combiner = new PromiseCombiner(); - combiner.addAll(new Future[] {realConnectPromise, initPromise}); - combiner.finish(callerPromise); + PromiseCombiner.combine(callerPromise, realConnectPromise, initPromise); } protected abstract void onRealConnect(ChannelHandlerContext ctx); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java index 29bf2822617..232fa83be44 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DefaultWriteCoalescer.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,9 +39,7 @@ * *

      It maintains a queue per event loop, with the writes targeting the channels that run on this * loop. As soon as a write gets enqueued, it triggers a task that will flush the queue (other - * writes can get enqueued before the task runs). Once that task is complete, it re-triggers itself - * as long as new writes have been enqueued, or {@code maxRunsWithNoWork} times if there are no more - * tasks. + * writes may get enqueued before or while the task runs). * *

      Note that Netty provides a similar mechanism out of the box ({@link * io.netty.handler.flush.FlushConsolidationHandler}), but in our experience our approach allows @@ -49,13 +49,11 @@ */ @ThreadSafe public class DefaultWriteCoalescer implements WriteCoalescer { - private final int maxRunsWithNoWork; private final long rescheduleIntervalNanos; private final ConcurrentMap flushers = new ConcurrentHashMap<>(); public DefaultWriteCoalescer(DriverContext context) { DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - maxRunsWithNoWork = config.getInt(DefaultDriverOption.COALESCER_MAX_RUNS); rescheduleIntervalNanos = config.getDuration(DefaultDriverOption.COALESCER_INTERVAL).toNanos(); } @@ -79,9 +77,8 @@ private class Flusher { private final Queue writes = new ConcurrentLinkedQueue<>(); private final AtomicBoolean running = new AtomicBoolean(); - // These variables are accessed only from runOnEventLoop, they don't need to be thread-safe + // This variable is accessed only from runOnEventLoop, it doesn't need to be thread-safe private final Set channels = new HashSet<>(); - private int runsWithNoWork = 0; private Flusher(EventLoop eventLoop) { this.eventLoop = eventLoop; @@ -98,13 +95,11 @@ private void enqueue(Write write) { private void runOnEventLoop() { assert eventLoop.inEventLoop(); - boolean didSomeWork = false; Write write; while ((write = writes.poll()) != null) { Channel channel = write.channel; channels.add(channel); channel.write(write.message, write.writePromise); - didSomeWork = true; } for (Channel channel : channels) { @@ -112,22 +107,24 @@ private void runOnEventLoop() { } channels.clear(); - if (didSomeWork) { - runsWithNoWork = 0; - } else if (++runsWithNoWork > maxRunsWithNoWork) { - // Prepare to stop - running.set(false); - // If no new writes have been enqueued since the previous line, we can return safely - if (writes.isEmpty()) { - return; - } - // Otherwise check if those writes have triggered a new run. If not, we need to do that - // ourselves (i.e. not return yet) - if (!running.compareAndSet(false, true)) { - return; - } + // Prepare to stop + running.set(false); + + // enqueue() can be called concurrently with this method. There is a race condition if it: + // - added an element in the queue after we were done draining it + // - but observed running==true before we flipped it, and therefore didn't schedule another + // run + + // If nothing was added in the queue, there were no concurrent calls, we can stop safely now + if (writes.isEmpty()) { + return; } - if (!eventLoop.isShuttingDown()) { + + // Otherwise, check if one of those calls scheduled a run. If so, they flipped the bit back + // on. If not, we need to do it ourselves. + boolean shouldRestartMyself = running.compareAndSet(false, true); + + if (shouldRestartMyself && !eventLoop.isShuttingDown()) { eventLoop.schedule(this::runOnEventLoop, rescheduleIntervalNanos, TimeUnit.NANOSECONDS); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java index 59978777b98..e40aa6f3097 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannel.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,13 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.connection.BusyConnectionException; import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; +import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.DefaultSession; import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; import com.datastax.oss.protocol.internal.Message; import io.netty.channel.Channel; @@ -29,6 +37,7 @@ import io.netty.util.concurrent.Promise; import java.net.SocketAddress; import java.nio.ByteBuffer; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import net.jcip.annotations.ThreadSafe; @@ -39,7 +48,10 @@ */ @ThreadSafe public class DriverChannel { - static final AttributeKey CLUSTER_NAME_KEY = AttributeKey.newInstance("cluster_name"); + + static final AttributeKey CLUSTER_NAME_KEY = AttributeKey.valueOf("cluster_name"); + static final AttributeKey>> OPTIONS_KEY = + AttributeKey.valueOf("options"); @SuppressWarnings("RedundantStringConstructorCall") static final Object GRACEFUL_CLOSE_MESSAGE = new String("GRACEFUL_CLOSE_MESSAGE"); @@ -120,15 +132,52 @@ public String getClusterName() { return channel.attr(CLUSTER_NAME_KEY).get(); } + public Map> getOptions() { + return channel.attr(OPTIONS_KEY).get(); + } + /** - * @return the number of available stream ids on the channel. This is used to weigh channels in - * pools that have a size bigger than 1, in the load balancing policy, and for monitoring - * purposes. + * @return the number of available stream ids on the channel; more precisely, this is the number + * of {@link #preAcquireId()} calls for which the id has not been released yet. This is used + * to weigh channels in pools that have a size bigger than 1, in the load balancing policy, + * and for monitoring purposes. */ public int getAvailableIds() { return inFlightHandler.getAvailableIds(); } + /** + * Indicates the intention to send a request using this channel. + * + *

      There must be exactly one invocation of this method before each call to {@link + * #write(Message, boolean, Map, ResponseCallback)}. If this method returns true, the client + * must proceed with the write. If it returns false, it must not proceed. + * + *

      This method is used together with {@link #getAvailableIds()} to track how many requests are + * currently executing on the channel, and avoid submitting a request that would result in a + * {@link BusyConnectionException}. The two methods follow atomic semantics: {@link + * #getAvailableIds()} returns the exact count of clients that have called {@link #preAcquireId()} + * and not yet released their stream id at this point in time. + * + *

      Most of the time, the driver code calls this method automatically: + * + *

        + *
      • if you obtained the channel from a pool ({@link ChannelPool#next()} or {@link + * DefaultSession#getChannel(Node, String)}), do not call this method: it has already + * been done as part of selecting the channel. + *
      • if you use {@link ChannelHandlerRequest} or {@link AdminRequestHandler} for internal + * queries, do not call this method, those classes already do it. + *
      • however, if you use {@link ThrottledAdminRequestHandler}, you must specify a {@code + * shouldPreAcquireId} argument to indicate whether to call this method or not. This is + * because those requests are sometimes used with a channel that comes from a pool + * (requiring {@code shouldPreAcquireId = false}), or sometimes with a standalone channel + * like in the control connection (requiring {@code shouldPreAcquireId = true}). + *
      + */ + public boolean preAcquireId() { + return inFlightHandler.preAcquireId(); + } + /** * @return the number of requests currently executing on this channel (including {@link * #getOrphanedIds() orphaned ids}). diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java index 258f1ab0c42..208cf52ac22 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/DriverChannelOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java index df85d8ca7b0..0ac71233fdd 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/EventCallback.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java index 0c4dba9ffc4..3dac60f5216 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/HeartbeatHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -71,7 +73,7 @@ private class HeartbeatRequest extends ChannelHandlerRequest { @Override String describe() { - return "heartbeat"; + return "Heartbeat request"; } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java index d6d69306871..90b02f358cd 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InFlightHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -117,17 +119,24 @@ public void write(ChannelHandlerContext ctx, Object in, ChannelPromise promise) private void write(ChannelHandlerContext ctx, RequestMessage message, ChannelPromise promise) { if (closingGracefully) { promise.setFailure(new IllegalStateException("Channel is closing")); + streamIds.cancelPreAcquire(); return; } int streamId = streamIds.acquire(); if (streamId < 0) { - promise.setFailure(new BusyConnectionException(streamIds.getMaxAvailableIds())); + // Should not happen with the preAcquire mechanism, but handle gracefully + promise.setFailure( + new BusyConnectionException( + String.format( + "Couldn't acquire a stream id from InFlightHandler on %s", ctx.channel()))); + streamIds.cancelPreAcquire(); return; } if (inFlight.containsKey(streamId)) { promise.setFailure( new IllegalStateException("Found pending callback for stream id " + streamId)); + streamIds.cancelPreAcquire(); return; } @@ -190,14 +199,14 @@ private void startGracefulShutdown(ChannelHandlerContext ctx) { LOG.debug("[{}] No pending queries, completing graceful shutdown now", logPrefix); ctx.channel().close(); } else { - // remove heartbeat handler from pipeline if present. - ChannelHandler heartbeatHandler = ctx.pipeline().get("heartbeat"); + // Remove heartbeat handler from pipeline if present. + ChannelHandler heartbeatHandler = ctx.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME); if (heartbeatHandler != null) { ctx.pipeline().remove(heartbeatHandler); } LOG.debug("[{}] There are pending queries, delaying graceful shutdown", logPrefix); closingGracefully = true; - closeStartedFuture.setSuccess(); + closeStartedFuture.trySuccess(); } } @@ -227,7 +236,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception wasInFlight = false; callback = orphaned.get(streamId); if (callback == null) { - LOG.trace("[{}] Got response on unknown stream id {}, skipping", streamId); + LOG.trace("[{}] Got response on unknown stream id {}, skipping", logPrefix, streamId); return; } } @@ -251,7 +260,8 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception } } catch (Throwable t) { if (wasInFlight) { - callback.onFailure( + fail( + callback, new IllegalArgumentException("Unexpected error while invoking response handler", t)); } else { // Assume the callback is already completed, so it's better to log @@ -276,12 +286,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable exception) thro // We know which request matches the failing response, fail that one only ResponseCallback responseCallback = inFlight.get(streamId); if (responseCallback != null) { - try { - responseCallback.onFailure(exception.getCause()); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, "[{}] Unexpected error while invoking failure handler", logPrefix, t); - } + fail(responseCallback, exception.getCause()); } release(streamId, ctx); } else { @@ -356,13 +361,16 @@ private void abortAllInFlight(DriverException cause) { */ private void abortAllInFlight(DriverException cause, ResponseCallback ignore) { if (!inFlight.isEmpty()) { - // Clear the map now and iterate on a copy, in case one of the onFailure calls below recurses - // back into this method - Set toAbort = ImmutableSet.copyOf(inFlight.values()); + + // Create a local copy and clear the map immediately. This prevents + // ConcurrentModificationException if aborting one of the handlers recurses back into this + // method. + Set responseCallbacks = ImmutableSet.copyOf(inFlight.values()); inFlight.clear(); - for (ResponseCallback responseCallback : toAbort) { + + for (ResponseCallback responseCallback : responseCallbacks) { if (responseCallback != ignore) { - responseCallback.onFailure(cause); + fail(responseCallback, cause); } } // It's not necessary to release the stream ids, since we always call this method right before @@ -370,10 +378,24 @@ private void abortAllInFlight(DriverException cause, ResponseCallback ignore) { } } + private void fail(ResponseCallback callback, Throwable failure) { + try { + callback.onFailure(failure); + } catch (Throwable throwable) { + // Protect against unexpected errors. We don't have anywhere to report the error (since + // onFailure failed), so log as a last resort. + LOG.error("[{}] Unexpected error while failing {}", logPrefix, callback, throwable); + } + } + int getAvailableIds() { return streamIds.getAvailableIds(); } + boolean preAcquireId() { + return streamIds.preAcquire(); + } + int getInFlight() { return streamIds.getMaxAvailableIds() - streamIds.getAvailableIds(); } @@ -395,7 +417,7 @@ private class SetKeyspaceRequest extends ChannelHandlerRequest { @Override String describe() { - return "[" + logPrefix + "] set keyspace " + keyspaceName; + return "[" + logPrefix + "] Set keyspace request (USE " + keyspaceName.asCql(true) + ")"; } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java index eea4b8a6179..518f398a808 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/InboundTrafficMeter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java index e07bcac99ed..768eb047b9d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/OutboundTrafficMeter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java index d8870bc3813..4e3f7d61f66 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/PassThroughWriteCoalescer.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java index 54cb427e365..8a426f7b368 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,12 +28,19 @@ import com.datastax.oss.driver.api.core.connection.ConnectionInitException; import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.protocol.BytesToSegmentDecoder; +import com.datastax.oss.driver.internal.core.protocol.FrameToSegmentEncoder; +import com.datastax.oss.driver.internal.core.protocol.SegmentToBytesEncoder; +import com.datastax.oss.driver.internal.core.protocol.SegmentToFrameDecoder; import com.datastax.oss.driver.internal.core.util.ProtocolUtils; import com.datastax.oss.driver.internal.core.util.concurrent.UncaughtExceptions; import com.datastax.oss.protocol.internal.Message; import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.ProtocolConstants.ErrorCode; import com.datastax.oss.protocol.internal.request.AuthResponse; +import com.datastax.oss.protocol.internal.request.Options; import com.datastax.oss.protocol.internal.request.Query; import com.datastax.oss.protocol.internal.request.Register; import com.datastax.oss.protocol.internal.request.Startup; @@ -40,11 +49,14 @@ import com.datastax.oss.protocol.internal.response.Authenticate; import com.datastax.oss.protocol.internal.response.Error; import com.datastax.oss.protocol.internal.response.Ready; +import com.datastax.oss.protocol.internal.response.Supported; import com.datastax.oss.protocol.internal.response.result.Rows; import com.datastax.oss.protocol.internal.response.result.SetKeyspace; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; import java.nio.ByteBuffer; import java.util.List; +import java.util.Objects; import net.jcip.annotations.NotThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,14 +81,21 @@ class ProtocolInitHandler extends ConnectInitHandler { private final HeartbeatHandler heartbeatHandler; private String logPrefix; private ChannelHandlerContext ctx; + private final boolean querySupportedOptions; + /** + * @param querySupportedOptions whether to send OPTIONS as the first message, to request which + * protocol options the channel supports. If this is true, the options will be stored as a + * channel attribute, and exposed via {@link DriverChannel#getOptions()}. + */ ProtocolInitHandler( InternalDriverContext context, ProtocolVersion protocolVersion, String expectedClusterName, EndPoint endPoint, DriverChannelOptions options, - HeartbeatHandler heartbeatHandler) { + HeartbeatHandler heartbeatHandler, + boolean querySupportedOptions) { this.context = context; this.endPoint = endPoint; @@ -89,6 +108,7 @@ class ProtocolInitHandler extends ConnectInitHandler { this.expectedClusterName = expectedClusterName; this.options = options; this.heartbeatHandler = heartbeatHandler; + this.querySupportedOptions = querySupportedOptions; this.logPrefix = options.ownerLogPrefix + "|connecting..."; } @@ -111,12 +131,17 @@ protected boolean setConnectSuccess() { boolean result = super.setConnectSuccess(); if (result) { // add heartbeat to pipeline now that protocol is initialized. - ctx.pipeline().addBefore("inflight", "heartbeat", heartbeatHandler); + ctx.pipeline() + .addBefore( + ChannelFactory.INFLIGHT_HANDLER_NAME, + ChannelFactory.HEARTBEAT_HANDLER_NAME, + heartbeatHandler); } return result; } private enum Step { + OPTIONS, STARTUP, GET_CLUSTER_NAME, SET_KEYSPACE, @@ -128,37 +153,48 @@ private class InitRequest extends ChannelHandlerRequest { // This class is a finite-state automaton, that sends a different query depending on the step // in the initialization sequence. private Step step; + private int stepNumber = 0; + private Message request; private Authenticator authenticator; - private ByteBuffer authReponseToken; + private ByteBuffer authResponseToken; InitRequest(ChannelHandlerContext ctx) { super(ctx, timeoutMillis); - this.step = Step.STARTUP; + this.step = querySupportedOptions ? Step.OPTIONS : Step.STARTUP; } @Override String describe() { - return "[" + logPrefix + "] init query " + step; + return String.format( + "[%s] Protocol initialization request, step %d (%s)", logPrefix, stepNumber, request); } @Override Message getRequest() { switch (step) { + case OPTIONS: + return request = Options.INSTANCE; case STARTUP: - return new Startup(context.getStartupOptions()); + return request = new Startup(context.getStartupOptions()); case GET_CLUSTER_NAME: - return CLUSTER_NAME_QUERY; + return request = CLUSTER_NAME_QUERY; case SET_KEYSPACE: - return new Query("USE " + options.keyspace.asCql(false)); + return request = new Query("USE " + options.keyspace.asCql(false)); case AUTH_RESPONSE: - return new AuthResponse(authReponseToken); + return request = new AuthResponse(authResponseToken); case REGISTER: - return new Register(options.eventTypes); + return request = new Register(options.eventTypes); default: throw new AssertionError("unhandled step: " + step); } } + @Override + void send() { + stepNumber++; + super.send(); + } + @Override void onResponse(Message response) { LOG.debug( @@ -167,11 +203,17 @@ void onResponse(Message response) { step, ProtocolUtils.opcodeString(response.opcode)); try { - if (step == Step.STARTUP && response instanceof Ready) { + if (step == Step.OPTIONS && response instanceof Supported) { + channel.attr(DriverChannel.OPTIONS_KEY).set(((Supported) response).options); + step = Step.STARTUP; + send(); + } else if (step == Step.STARTUP && response instanceof Ready) { + maybeSwitchToModernFraming(); context.getAuthProvider().ifPresent(provider -> provider.onMissingChallenge(endPoint)); step = Step.GET_CLUSTER_NAME; send(); } else if (step == Step.STARTUP && response instanceof Authenticate) { + maybeSwitchToModernFraming(); Authenticate authenticate = (Authenticate) response; authenticator = buildAuthenticator(endPoint, authenticate.authenticator); authenticator @@ -181,10 +223,14 @@ void onResponse(Message response) { if (error != null) { fail( new AuthenticationException( - endPoint, "authenticator threw an exception", error)); + endPoint, + String.format( + "Authenticator.initialResponse(): stage completed exceptionally (%s)", + error), + error)); } else { step = Step.AUTH_RESPONSE; - authReponseToken = token; + authResponseToken = token; send(); } }, @@ -199,10 +245,14 @@ void onResponse(Message response) { if (error != null) { fail( new AuthenticationException( - endPoint, "authenticator threw an exception", error)); + endPoint, + String.format( + "Authenticator.evaluateChallenge(): stage completed exceptionally (%s)", + error), + error)); } else { step = Step.AUTH_RESPONSE; - authReponseToken = token; + authResponseToken = token; send(); } }, @@ -217,7 +267,11 @@ void onResponse(Message response) { if (error != null) { fail( new AuthenticationException( - endPoint, "authenticator threw an exception", error)); + endPoint, + String.format( + "Authenticator.onAuthenticationSuccess(): stage completed exceptionally (%s)", + error), + error)); } else { step = Step.GET_CLUSTER_NAME; send(); @@ -230,10 +284,13 @@ void onResponse(Message response) { && ((Error) response).code == ProtocolConstants.ErrorCode.AUTH_ERROR) { fail( new AuthenticationException( - endPoint, String.format("server replied '%s'", ((Error) response).message))); + endPoint, + String.format( + "server replied with '%s' to AuthResponse request", + ((Error) response).message))); } else if (step == Step.GET_CLUSTER_NAME && response instanceof Rows) { Rows rows = (Rows) response; - List row = rows.getData().poll(); + List row = Objects.requireNonNull(rows.getData().poll()); String actualClusterName = getString(row, 0); if (expectedClusterName != null && !expectedClusterName.equals(actualClusterName)) { fail( @@ -265,13 +322,17 @@ void onResponse(Message response) { } else if (response instanceof Error) { Error error = (Error) response; // Testing for a specific string is a tad fragile but Cassandra doesn't give us a more - // precise error - // code. + // precise error code. // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451. - if (step == Step.STARTUP - && (error.code == ProtocolConstants.ErrorCode.PROTOCOL_ERROR - || error.code == ProtocolConstants.ErrorCode.SERVER_ERROR) - && error.message.contains("Invalid or unsupported protocol version")) { + boolean firstRequest = + (step == Step.OPTIONS && querySupportedOptions) || step == Step.STARTUP; + boolean serverOrProtocolError = + error.code == ErrorCode.PROTOCOL_ERROR || error.code == ErrorCode.SERVER_ERROR; + boolean badProtocolVersionMessage = + error.message.contains("Invalid or unsupported protocol version") + // JAVA-2925: server is behind driver and considers the proposed version as beta + || error.message.contains("Beta version of the protocol used"); + if (firstRequest && serverOrProtocolError && badProtocolVersionMessage) { fail( UnsupportedProtocolVersionException.forSingleAttempt( endPoint, initialProtocolVersion)); @@ -287,7 +348,7 @@ void onResponse(Message response) { } catch (AuthenticationException e) { fail(e); } catch (Throwable t) { - fail("Unexpected exception at step " + step, t); + fail(String.format("%s: unexpected exception (%s)", describe(), t), t); } } @@ -317,6 +378,42 @@ public String toString() { } } + /** + * Rearranges the pipeline to deal with the new framing structure in protocol v5 and above. The + * first messages still use the legacy format, we only do this after a successful response to the + * first STARTUP message. + */ + private void maybeSwitchToModernFraming() { + if (context + .getProtocolVersionRegistry() + .supports(initialProtocolVersion, DefaultProtocolFeature.MODERN_FRAMING)) { + + ChannelPipeline pipeline = ctx.pipeline(); + + // We basically add one conversion step in the middle: frames <-> *segments* <-> bytes + // Outbound: + pipeline.replace( + ChannelFactory.FRAME_TO_BYTES_ENCODER_NAME, + ChannelFactory.FRAME_TO_SEGMENT_ENCODER_NAME, + new FrameToSegmentEncoder( + context.getPrimitiveCodec(), context.getFrameCodec(), logPrefix)); + pipeline.addBefore( + ChannelFactory.FRAME_TO_SEGMENT_ENCODER_NAME, + ChannelFactory.SEGMENT_TO_BYTES_ENCODER_NAME, + new SegmentToBytesEncoder(context.getSegmentCodec())); + + // Inbound: + pipeline.replace( + ChannelFactory.BYTES_TO_FRAME_DECODER_NAME, + ChannelFactory.BYTES_TO_SEGMENT_DECODER_NAME, + new BytesToSegmentDecoder(context.getSegmentCodec())); + pipeline.addAfter( + ChannelFactory.BYTES_TO_SEGMENT_DECODER_NAME, + ChannelFactory.SEGMENT_TO_FRAME_DECODER_NAME, + new SegmentToFrameDecoder(context.getFrameCodec(), logPrefix)); + } + } + private String getString(List row, int i) { return TypeCodecs.TEXT.decode(row.get(i), DefaultProtocolVersion.DEFAULT); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java index e8fcd87247f..5a0e9e5eb86 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/ResponseCallback.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java index 77e985064b4..3384bc57c94 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/StreamIdGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +18,17 @@ package com.datastax.oss.driver.internal.core.channel; import java.util.BitSet; +import java.util.concurrent.atomic.AtomicInteger; import net.jcip.annotations.NotThreadSafe; /** * Manages the set of identifiers used to distinguish multiplexed requests on a channel. * - *

      This class is not thread safe: calls to {@link #acquire()} and {@link #release(int)} must be - * properly synchronized (in practice this is done by only calling them from the I/O thread). - * However, {@link #getAvailableIds()} has volatile semantics. + *

      {@link #preAcquire()} / {@link #getAvailableIds()} follow atomic semantics. See {@link + * DriverChannel#preAcquireId()} for more explanations. + * + *

      Other methods are not synchronized, they are only called by {@link InFlightHandler} on the I/O + * thread. */ @NotThreadSafe class StreamIdGenerator { @@ -31,37 +36,52 @@ class StreamIdGenerator { private final int maxAvailableIds; // unset = available, set = borrowed (note that this is the opposite of the 3.x implementation) private final BitSet ids; - private volatile int availableIds; + private final AtomicInteger availableIds; StreamIdGenerator(int maxAvailableIds) { this.maxAvailableIds = maxAvailableIds; this.ids = new BitSet(this.maxAvailableIds); - this.availableIds = this.maxAvailableIds; + this.availableIds = new AtomicInteger(this.maxAvailableIds); + } + + boolean preAcquire() { + while (true) { + int current = availableIds.get(); + assert current >= 0; + if (current == 0) { + return false; + } else if (availableIds.compareAndSet(current, current - 1)) { + return true; + } + } + } + + void cancelPreAcquire() { + int available = availableIds.incrementAndGet(); + assert available <= maxAvailableIds; } - @SuppressWarnings("NonAtomicVolatileUpdate") // see explanation in class Javadoc int acquire() { + assert availableIds.get() < maxAvailableIds; int id = ids.nextClearBit(0); if (id >= maxAvailableIds) { return -1; } ids.set(id); - availableIds--; return id; } - @SuppressWarnings("NonAtomicVolatileUpdate") void release(int id) { - if (ids.get(id)) { - availableIds++; - } else { + if (!ids.get(id)) { throw new IllegalStateException("Tried to release id that hadn't been borrowed: " + id); } ids.clear(id); + int available = availableIds.incrementAndGet(); + assert available <= maxAvailableIds; } int getAvailableIds() { - return availableIds; + return availableIds.get(); } int getMaxAvailableIds() { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java index 03fa691049d..03391c57809 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/WriteCoalescer.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java index ecf6b6bc5af..d8514bdb88c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/channel/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java index a4dd8fd67c0..d2898d39925 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/ConfigChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java new file mode 100644 index 00000000000..39c37d78c10 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DerivedExecutionProfile.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.time.Duration; +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.function.BiFunction; + +public class DerivedExecutionProfile implements DriverExecutionProfile { + + private static final Object NO_VALUE = new Object(); + + public static DerivedExecutionProfile with( + DriverExecutionProfile baseProfile, DriverOption option, Object value) { + if (baseProfile instanceof DerivedExecutionProfile) { + // Don't nest derived profiles, use same base and add to overrides + DerivedExecutionProfile previousDerived = (DerivedExecutionProfile) baseProfile; + ImmutableMap.Builder newOverrides = ImmutableMap.builder(); + for (Map.Entry override : previousDerived.overrides.entrySet()) { + if (!override.getKey().equals(option)) { + newOverrides.put(override.getKey(), override.getValue()); + } + } + newOverrides.put(option, value); + return new DerivedExecutionProfile(previousDerived.baseProfile, newOverrides.build()); + } else { + return new DerivedExecutionProfile(baseProfile, ImmutableMap.of(option, value)); + } + } + + public static DerivedExecutionProfile without( + DriverExecutionProfile baseProfile, DriverOption option) { + return with(baseProfile, option, NO_VALUE); + } + + private final DriverExecutionProfile baseProfile; + private final Map overrides; + + public DerivedExecutionProfile( + DriverExecutionProfile baseProfile, Map overrides) { + this.baseProfile = baseProfile; + this.overrides = overrides; + } + + @NonNull + @Override + public String getName() { + return baseProfile.getName(); + } + + @Override + public boolean isDefined(@NonNull DriverOption option) { + if (overrides.containsKey(option)) { + return overrides.get(option) != NO_VALUE; + } else { + return baseProfile.isDefined(option); + } + } + + @Override + public boolean getBoolean(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getBoolean); + } + + @NonNull + @Override + public List getBooleanList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getBooleanList); + } + + @Override + public int getInt(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getInt); + } + + @NonNull + @Override + public List getIntList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getIntList); + } + + @Override + public long getLong(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getLong); + } + + @NonNull + @Override + public List getLongList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getLongList); + } + + @Override + public double getDouble(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDouble); + } + + @NonNull + @Override + public List getDoubleList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDoubleList); + } + + @NonNull + @Override + public String getString(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getString); + } + + @NonNull + @Override + public List getStringList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getStringList); + } + + @NonNull + @Override + public Map getStringMap(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getStringMap); + } + + @Override + public long getBytes(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getBytes); + } + + @NonNull + @Override + public List getBytesList(DriverOption option) { + return get(option, DriverExecutionProfile::getBytesList); + } + + @NonNull + @Override + public Duration getDuration(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDuration); + } + + @NonNull + @Override + public List getDurationList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDurationList); + } + + @NonNull + @SuppressWarnings("unchecked") + private ValueT get( + @NonNull DriverOption option, + BiFunction getter) { + Object value = overrides.get(option); + if (value == null) { + value = getter.apply(baseProfile, option); + } + if (value == null || value == NO_VALUE) { + throw new IllegalArgumentException("Missing configuration option " + option.getPath()); + } + return (ValueT) value; + } + + @NonNull + @Override + public SortedSet> entrySet() { + ImmutableSortedSet.Builder> builder = + ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); + // builder.add() has no effect if the element already exists, so process the overrides first + // since they have higher precedence + for (Map.Entry entry : overrides.entrySet()) { + if (entry.getValue() != NO_VALUE) { + builder.add(new AbstractMap.SimpleEntry<>(entry.getKey().getPath(), entry.getValue())); + } + } + builder.addAll(baseProfile.entrySet()); + return builder.build(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java index 65a3fa4b97b..5775fcbe507 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/DriverOptionConfigBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +18,6 @@ package com.datastax.oss.driver.internal.core.config; import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoaderBuilder; import edu.umd.cs.findbugs.annotations.CheckReturnValue; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -24,7 +25,7 @@ import java.util.List; import java.util.Map; -/** @see DefaultDriverConfigLoaderBuilder */ +/** @see com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoaderBuilder */ @Deprecated public interface DriverOptionConfigBuilder { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java new file mode 100644 index 00000000000..1a1076e9d78 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfig.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.cloud; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.net.InetSocketAddress; +import java.util.List; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class CloudConfig { + + private final InetSocketAddress proxyAddress; + private final List endPoints; + private final String localDatacenter; + private final SslEngineFactory sslEngineFactory; + + CloudConfig( + @NonNull InetSocketAddress proxyAddress, + @NonNull List endPoints, + @NonNull String localDatacenter, + @NonNull SslEngineFactory sslEngineFactory) { + this.proxyAddress = proxyAddress; + this.endPoints = ImmutableList.copyOf(endPoints); + this.localDatacenter = localDatacenter; + this.sslEngineFactory = sslEngineFactory; + } + + @NonNull + public InetSocketAddress getProxyAddress() { + return proxyAddress; + } + + @NonNull + public List getEndPoints() { + return endPoints; + } + + @NonNull + public String getLocalDatacenter() { + return localDatacenter; + } + + @NonNull + public SslEngineFactory getSslEngineFactory() { + return sslEngineFactory; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java new file mode 100644 index 00000000000..817b3263d25 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactory.java @@ -0,0 +1,296 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.cloud; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.internal.core.metadata.SniEndPoint; +import com.datastax.oss.driver.internal.core.ssl.SniSslEngineFactory; +import com.datastax.oss.driver.shaded.guava.common.io.ByteStreams; +import com.datastax.oss.driver.shaded.guava.common.net.HostAndPort; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class CloudConfigFactory { + private static final Logger LOG = LoggerFactory.getLogger(CloudConfigFactory.class); + /** + * Creates a {@link CloudConfig} with information fetched from the specified Cloud configuration + * URL. + * + *

      The target URL must point to a valid secure connect bundle archive in ZIP format. + * + * @param cloudConfigUrl the URL to fetch the Cloud configuration from; cannot be null. + * @throws IOException If the Cloud configuration cannot be read. + * @throws GeneralSecurityException If the Cloud SSL context cannot be created. + */ + @NonNull + public CloudConfig createCloudConfig(@NonNull URL cloudConfigUrl) + throws IOException, GeneralSecurityException { + Objects.requireNonNull(cloudConfigUrl, "cloudConfigUrl cannot be null"); + return createCloudConfig(cloudConfigUrl.openStream()); + } + + /** + * Creates a {@link CloudConfig} with information fetched from the specified {@link InputStream}. + * + *

      The stream must contain a valid secure connect bundle archive in ZIP format. Note that the + * stream will be closed after a call to that method and cannot be used anymore. + * + * @param cloudConfig the stream to read the Cloud configuration from; cannot be null. + * @throws IOException If the Cloud configuration cannot be read. + * @throws GeneralSecurityException If the Cloud SSL context cannot be created. + */ + @NonNull + public CloudConfig createCloudConfig(@NonNull InputStream cloudConfig) + throws IOException, GeneralSecurityException { + Objects.requireNonNull(cloudConfig, "cloudConfig cannot be null"); + JsonNode configJson = null; + ByteArrayOutputStream keyStoreOutputStream = null; + ByteArrayOutputStream trustStoreOutputStream = null; + ObjectMapper mapper = new ObjectMapper().configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); + try (ZipInputStream zipInputStream = new ZipInputStream(cloudConfig)) { + ZipEntry entry; + while ((entry = zipInputStream.getNextEntry()) != null) { + String fileName = entry.getName(); + switch (fileName) { + case "config.json": + configJson = mapper.readTree(zipInputStream); + break; + case "identity.jks": + keyStoreOutputStream = new ByteArrayOutputStream(); + ByteStreams.copy(zipInputStream, keyStoreOutputStream); + break; + case "trustStore.jks": + trustStoreOutputStream = new ByteArrayOutputStream(); + ByteStreams.copy(zipInputStream, trustStoreOutputStream); + break; + } + } + } + if (configJson == null) { + throw new IllegalStateException("Invalid bundle: missing file config.json"); + } + if (keyStoreOutputStream == null) { + throw new IllegalStateException("Invalid bundle: missing file identity.jks"); + } + if (trustStoreOutputStream == null) { + throw new IllegalStateException("Invalid bundle: missing file trustStore.jks"); + } + char[] keyStorePassword = getKeyStorePassword(configJson); + char[] trustStorePassword = getTrustStorePassword(configJson); + ByteArrayInputStream keyStoreInputStream = + new ByteArrayInputStream(keyStoreOutputStream.toByteArray()); + ByteArrayInputStream trustStoreInputStream = + new ByteArrayInputStream(trustStoreOutputStream.toByteArray()); + SSLContext sslContext = + createSslContext( + keyStoreInputStream, keyStorePassword, trustStoreInputStream, trustStorePassword); + URL metadataServiceUrl = getMetadataServiceUrl(configJson); + JsonNode proxyMetadataJson; + try (BufferedReader proxyMetadata = fetchProxyMetadata(metadataServiceUrl, sslContext)) { + proxyMetadataJson = mapper.readTree(proxyMetadata); + } + InetSocketAddress sniProxyAddress = getSniProxyAddress(proxyMetadataJson); + List endPoints = getEndPoints(proxyMetadataJson, sniProxyAddress); + String localDatacenter = getLocalDatacenter(proxyMetadataJson); + SniSslEngineFactory sslEngineFactory = new SniSslEngineFactory(sslContext); + validateIfBundleContainsUsernamePassword(configJson); + return new CloudConfig(sniProxyAddress, endPoints, localDatacenter, sslEngineFactory); + } + + @NonNull + protected char[] getKeyStorePassword(JsonNode configFile) { + if (configFile.has("keyStorePassword")) { + return configFile.get("keyStorePassword").asText().toCharArray(); + } else { + throw new IllegalStateException("Invalid config.json: missing field keyStorePassword"); + } + } + + @NonNull + protected char[] getTrustStorePassword(JsonNode configFile) { + if (configFile.has("trustStorePassword")) { + return configFile.get("trustStorePassword").asText().toCharArray(); + } else { + throw new IllegalStateException("Invalid config.json: missing field trustStorePassword"); + } + } + + @NonNull + protected URL getMetadataServiceUrl(JsonNode configFile) throws MalformedURLException { + if (configFile.has("host")) { + String metadataServiceHost = configFile.get("host").asText(); + if (configFile.has("port")) { + int metadataServicePort = configFile.get("port").asInt(); + return new URL("https", metadataServiceHost, metadataServicePort, "/metadata"); + } else { + throw new IllegalStateException("Invalid config.json: missing field port"); + } + } else { + throw new IllegalStateException("Invalid config.json: missing field host"); + } + } + + protected void validateIfBundleContainsUsernamePassword(JsonNode configFile) { + if (configFile.has("username") || configFile.has("password")) { + LOG.info( + "The bundle contains config.json with username and/or password. Providing it in the bundle is deprecated and ignored."); + } + } + + @NonNull + protected SSLContext createSslContext( + @NonNull ByteArrayInputStream keyStoreInputStream, + @NonNull char[] keyStorePassword, + @NonNull ByteArrayInputStream trustStoreInputStream, + @NonNull char[] trustStorePassword) + throws IOException, GeneralSecurityException { + KeyManagerFactory kmf = createKeyManagerFactory(keyStoreInputStream, keyStorePassword); + TrustManagerFactory tmf = createTrustManagerFactory(trustStoreInputStream, trustStorePassword); + SSLContext sslContext = SSLContext.getInstance("SSL"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + return sslContext; + } + + @NonNull + protected KeyManagerFactory createKeyManagerFactory( + @NonNull InputStream keyStoreInputStream, @NonNull char[] keyStorePassword) + throws IOException, GeneralSecurityException { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(keyStoreInputStream, keyStorePassword); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(ks, keyStorePassword); + Arrays.fill(keyStorePassword, (char) 0); + return kmf; + } + + @NonNull + protected TrustManagerFactory createTrustManagerFactory( + @NonNull InputStream trustStoreInputStream, @NonNull char[] trustStorePassword) + throws IOException, GeneralSecurityException { + KeyStore ts = KeyStore.getInstance("JKS"); + ts.load(trustStoreInputStream, trustStorePassword); + TrustManagerFactory tmf = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ts); + Arrays.fill(trustStorePassword, (char) 0); + return tmf; + } + + @NonNull + protected BufferedReader fetchProxyMetadata( + @NonNull URL metadataServiceUrl, @NonNull SSLContext sslContext) throws IOException { + try { + HttpsURLConnection connection = (HttpsURLConnection) metadataServiceUrl.openConnection(); + connection.setSSLSocketFactory(sslContext.getSocketFactory()); + connection.setRequestMethod("GET"); + return new BufferedReader( + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8)); + } catch (ConnectException e) { + throw new IllegalStateException( + "Unable to connect to cloud metadata service. Please make sure your cluster is not parked or terminated", + e); + } catch (UnknownHostException e) { + throw new IllegalStateException( + "Unable to resolve host for cloud metadata service. Please make sure your cluster is not terminated", + e); + } + } + + @NonNull + protected String getLocalDatacenter(@NonNull JsonNode proxyMetadata) { + JsonNode contactInfo = getContactInfo(proxyMetadata); + if (contactInfo.has("local_dc")) { + return contactInfo.get("local_dc").asText(); + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field local_dc"); + } + } + + @NonNull + protected InetSocketAddress getSniProxyAddress(@NonNull JsonNode proxyMetadata) { + JsonNode contactInfo = getContactInfo(proxyMetadata); + if (contactInfo.has("sni_proxy_address")) { + HostAndPort sniProxyHostAndPort = + HostAndPort.fromString(contactInfo.get("sni_proxy_address").asText()); + if (!sniProxyHostAndPort.hasPort()) { + throw new IllegalStateException( + "Invalid proxy metadata: missing port from field sni_proxy_address"); + } + return InetSocketAddress.createUnresolved( + sniProxyHostAndPort.getHost(), sniProxyHostAndPort.getPort()); + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field sni_proxy_address"); + } + } + + @NonNull + protected List getEndPoints( + @NonNull JsonNode proxyMetadata, @NonNull InetSocketAddress sniProxyAddress) { + JsonNode contactInfo = getContactInfo(proxyMetadata); + if (contactInfo.has("contact_points")) { + List endPoints = new ArrayList<>(); + JsonNode hostIdsJson = contactInfo.get("contact_points"); + for (int i = 0; i < hostIdsJson.size(); i++) { + endPoints.add(new SniEndPoint(sniProxyAddress, hostIdsJson.get(i).asText())); + } + return endPoints; + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field contact_points"); + } + } + + @NonNull + protected JsonNode getContactInfo(@NonNull JsonNode proxyMetadata) { + if (proxyMetadata.has("contact_info")) { + return proxyMetadata.get("contact_info"); + } else { + throw new IllegalStateException("Invalid proxy metadata: missing field contact_info"); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java new file mode 100644 index 00000000000..9a74d00df4f --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfig.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.composite; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + +public class CompositeDriverConfig implements DriverConfig { + + private final DriverConfig primaryConfig; + private final DriverConfig fallbackConfig; + private final Map profiles = new ConcurrentHashMap<>(); + + public CompositeDriverConfig( + @NonNull DriverConfig primaryConfig, @NonNull DriverConfig fallbackConfig) { + this.primaryConfig = Objects.requireNonNull(primaryConfig); + this.fallbackConfig = Objects.requireNonNull(fallbackConfig); + } + + @NonNull + @Override + public DriverExecutionProfile getProfile(@NonNull String profileName) { + return profiles.compute( + profileName, + (k, v) -> + (v == null) + ? new CompositeDriverExecutionProfile(primaryConfig, fallbackConfig, profileName) + : v.refresh()); + } + + @NonNull + @Override + public Map getProfiles() { + // The map is updated lazily, if we want all the profiles we need to fetch them explicitly + for (String name : + Sets.union(primaryConfig.getProfiles().keySet(), fallbackConfig.getProfiles().keySet())) { + getProfile(name); + } + return Collections.unmodifiableMap(profiles); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java new file mode 100644 index 00000000000..23baf458c85 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigLoader.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.composite; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + +public class CompositeDriverConfigLoader implements DriverConfigLoader { + + private final DriverConfigLoader primaryConfigLoader; + private final DriverConfigLoader fallbackConfigLoader; + + public CompositeDriverConfigLoader( + @NonNull DriverConfigLoader primaryConfigLoader, + @NonNull DriverConfigLoader fallbackConfigLoader) { + this.primaryConfigLoader = Objects.requireNonNull(primaryConfigLoader); + this.fallbackConfigLoader = Objects.requireNonNull(fallbackConfigLoader); + } + + @NonNull + @Override + public DriverConfig getInitialConfig() { + DriverConfig primaryConfig = primaryConfigLoader.getInitialConfig(); + DriverConfig fallbackConfig = fallbackConfigLoader.getInitialConfig(); + return new CompositeDriverConfig(primaryConfig, fallbackConfig); + } + + @Override + public void onDriverInit(@NonNull DriverContext context) { + fallbackConfigLoader.onDriverInit(context); + primaryConfigLoader.onDriverInit(context); + } + + @NonNull + @Override + public CompletionStage reload() { + if (!primaryConfigLoader.supportsReloading() && !fallbackConfigLoader.supportsReloading()) { + return CompletableFutures.failedFuture( + new UnsupportedOperationException( + "Reloading is not supported (this is a composite config, " + + "and neither the primary nor the fallback are reloadable)")); + } else if (!primaryConfigLoader.supportsReloading()) { + return fallbackConfigLoader.reload(); + } else if (!fallbackConfigLoader.supportsReloading()) { + return primaryConfigLoader.reload(); + } else { + CompletionStage primaryFuture = primaryConfigLoader.reload(); + CompletionStage fallbackFuture = fallbackConfigLoader.reload(); + CompletableFuture compositeFuture = new CompletableFuture<>(); + primaryFuture.whenComplete( + (primaryChanged, primaryError) -> + fallbackFuture.whenComplete( + (fallbackChanged, fallbackError) -> { + if (primaryError == null && fallbackError == null) { + compositeFuture.complete(primaryChanged || fallbackChanged); + } else if (fallbackError == null) { + compositeFuture.completeExceptionally(primaryError); + } else if (primaryError == null) { + compositeFuture.completeExceptionally(fallbackError); + } else { + primaryError.addSuppressed(fallbackError); + compositeFuture.completeExceptionally(primaryError); + } + })); + return compositeFuture; + } + } + + @Override + public boolean supportsReloading() { + return primaryConfigLoader.supportsReloading() || fallbackConfigLoader.supportsReloading(); + } + + @Override + public void close() { + primaryConfigLoader.close(); + fallbackConfigLoader.close(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java new file mode 100644 index 00000000000..147d9e0bdb4 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverExecutionProfile.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.composite; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.BiFunction; + +public class CompositeDriverExecutionProfile implements DriverExecutionProfile { + + private final DriverConfig primaryConfig; + private final DriverConfig fallbackConfig; + private final String profileName; + + @Nullable private volatile DriverExecutionProfile primaryProfile; + @Nullable private volatile DriverExecutionProfile fallbackProfile; + + public CompositeDriverExecutionProfile( + @NonNull DriverConfig primaryConfig, + @NonNull DriverConfig fallbackConfig, + @NonNull String profileName) { + this.primaryConfig = Objects.requireNonNull(primaryConfig); + this.fallbackConfig = Objects.requireNonNull(fallbackConfig); + this.profileName = Objects.requireNonNull(profileName); + refreshInternal(); + } + + /** + * Fetches the underlying profiles again from the two backing configs. This is because some config + * implementations support adding/removing profiles at runtime. + * + *

      For efficiency reasons this is only done when the user fetches the profile again from the + * main config, not every time an option is fetched from the profile. + */ + public CompositeDriverExecutionProfile refresh() { + return refreshInternal(); + } + + // This method only exists to avoid calling its public, overridable variant from the constructor + private CompositeDriverExecutionProfile refreshInternal() { + // There's no `hasProfile()` in the public API because it didn't make sense until now. So + // unfortunately we have to catch the exception. + try { + primaryProfile = primaryConfig.getProfile(profileName); + } catch (IllegalArgumentException e) { + primaryProfile = null; + } + try { + fallbackProfile = fallbackConfig.getProfile(profileName); + } catch (IllegalArgumentException e) { + fallbackProfile = null; + } + + Preconditions.checkArgument( + primaryProfile != null || fallbackProfile != null, + "Unknown profile '%s'. Check your configuration.", + profileName); + return this; + } + + @NonNull + @Override + public String getName() { + return profileName; + } + + @Override + public boolean isDefined(@NonNull DriverOption option) { + DriverExecutionProfile primaryProfile = this.primaryProfile; + if (primaryProfile != null && primaryProfile.isDefined(option)) { + return true; + } else { + DriverExecutionProfile fallbackProfile = this.fallbackProfile; + return fallbackProfile != null && fallbackProfile.isDefined(option); + } + } + + @Override + public boolean getBoolean(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getBoolean); + } + + @NonNull + @Override + public List getBooleanList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getBooleanList); + } + + @Override + public int getInt(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getInt); + } + + @NonNull + @Override + public List getIntList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getIntList); + } + + @Override + public long getLong(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getLong); + } + + @NonNull + @Override + public List getLongList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getLongList); + } + + @Override + public double getDouble(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDouble); + } + + @NonNull + @Override + public List getDoubleList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDoubleList); + } + + @NonNull + @Override + public String getString(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getString); + } + + @NonNull + @Override + public List getStringList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getStringList); + } + + @NonNull + @Override + public Map getStringMap(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getStringMap); + } + + @Override + public long getBytes(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getBytes); + } + + @NonNull + @Override + public List getBytesList(DriverOption option) { + return get(option, DriverExecutionProfile::getBytesList); + } + + @NonNull + @Override + public Duration getDuration(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDuration); + } + + @NonNull + @Override + public List getDurationList(@NonNull DriverOption option) { + return get(option, DriverExecutionProfile::getDurationList); + } + + private ValueT get( + @NonNull DriverOption option, + BiFunction getter) { + DriverExecutionProfile primaryProfile = this.primaryProfile; + if (primaryProfile != null && primaryProfile.isDefined(option)) { + return getter.apply(primaryProfile, option); + } else { + DriverExecutionProfile fallbackProfile = this.fallbackProfile; + if (fallbackProfile != null && fallbackProfile.isDefined(option)) { + return getter.apply(fallbackProfile, option); + } else { + throw new IllegalArgumentException("Unknown option: " + option); + } + } + } + + @NonNull + @Override + public SortedSet> entrySet() { + DriverExecutionProfile primaryProfile = this.primaryProfile; + DriverExecutionProfile fallbackProfile = this.fallbackProfile; + if (primaryProfile != null && fallbackProfile != null) { + SortedSet> result = new TreeSet<>(Map.Entry.comparingByKey()); + result.addAll(fallbackProfile.entrySet()); + result.addAll(primaryProfile.entrySet()); + return ImmutableSortedSet.copyOf(Map.Entry.comparingByKey(), result); + } else if (primaryProfile != null) { + return primaryProfile.entrySet(); + } else { + assert fallbackProfile != null; + return fallbackProfile.entrySet(); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java new file mode 100644 index 00000000000..74adbf120ca --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfig.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.map; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** @see MapBasedDriverConfigLoader */ +public class MapBasedDriverConfig implements DriverConfig { + + private final Map> optionsMap; + private final Map profiles = new ConcurrentHashMap<>(); + + public MapBasedDriverConfig(Map> optionsMap) { + this.optionsMap = optionsMap; + if (!optionsMap.containsKey(DriverExecutionProfile.DEFAULT_NAME)) { + throw new IllegalArgumentException( + "The options map must contain a profile named " + DriverExecutionProfile.DEFAULT_NAME); + } + createMissingProfiles(); + } + + @NonNull + @Override + public DriverExecutionProfile getProfile(@NonNull String profileName) { + return profiles.computeIfAbsent(profileName, this::newProfile); + } + + @NonNull + @Override + public Map getProfiles() { + // Refresh in case profiles were added to the backing map + createMissingProfiles(); + return Collections.unmodifiableMap(profiles); + } + + private void createMissingProfiles() { + for (Map.Entry> entry : optionsMap.entrySet()) { + String profileName = entry.getKey(); + if (!profiles.containsKey(profileName)) { + profiles.put(profileName, newProfile(profileName)); + } + } + } + + private MapBasedDriverExecutionProfile newProfile(String profileName) { + return new MapBasedDriverExecutionProfile(optionsMap, profileName); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java new file mode 100644 index 00000000000..14f959e5dc0 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoader.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.map; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; +import com.datastax.oss.driver.internal.core.context.EventBus; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.function.Consumer; + +public class MapBasedDriverConfigLoader implements DriverConfigLoader, Consumer { + + @NonNull private final OptionsMap source; + @NonNull private final Map> rawMap; + private volatile EventBus eventBus; + + public MapBasedDriverConfigLoader( + @NonNull OptionsMap source, @NonNull Map> rawMap) { + this.source = source; + this.rawMap = rawMap; + } + + @NonNull + @Override + public DriverConfig getInitialConfig() { + return new MapBasedDriverConfig(rawMap); + } + + @Override + public void onDriverInit(@NonNull DriverContext context) { + eventBus = ((InternalDriverContext) context).getEventBus(); + source.addChangeListener(this); + } + + @Override + public void accept(OptionsMap map) { + assert eventBus != null; // listener is registered after setting this field + eventBus.fire(ConfigChangeEvent.INSTANCE); + } + + @NonNull + @Override + public CompletionStage reload() { + return CompletableFuture.completedFuture(true); + } + + @Override + public boolean supportsReloading() { + return true; + } + + @Override + public void close() { + source.removeChangeListener(this); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java new file mode 100644 index 00000000000..4234befd94b --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverExecutionProfile.java @@ -0,0 +1,187 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.map; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.time.Duration; +import java.util.AbstractMap; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; + +/** @see MapBasedDriverConfigLoader */ +public class MapBasedDriverExecutionProfile implements DriverExecutionProfile { + + private final String profileName; + // The backing map for the current profile + private final Map profile; + // The backing map for the default profile (if the current one is not the default) + private final Map defaultProfile; + + public MapBasedDriverExecutionProfile( + Map> optionsMap, String profileName) { + this( + profileName, + optionsMap.get(profileName), + profileName.equals(DriverExecutionProfile.DEFAULT_NAME) + ? Collections.emptyMap() + : optionsMap.get(DriverExecutionProfile.DEFAULT_NAME)); + Preconditions.checkArgument( + optionsMap.containsKey(profileName), + "Unknown profile '%s'. Check your configuration.", + profileName); + } + + public MapBasedDriverExecutionProfile( + String profileName, + Map profile, + Map defaultProfile) { + this.profileName = profileName; + this.profile = profile; + this.defaultProfile = defaultProfile; + } + + @NonNull + @Override + public String getName() { + return profileName; + } + + @Override + public boolean isDefined(@NonNull DriverOption option) { + return profile.containsKey(option) || defaultProfile.containsKey(option); + } + + // Driver options don't encode the type, everything relies on the user putting the right types in + // the backing map, so no point in trying to type-check. + @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) + @NonNull + private T get(@NonNull DriverOption option) { + Object value = profile.getOrDefault(option, defaultProfile.get(option)); + if (value == null) { + throw new IllegalArgumentException("Missing configuration option " + option.getPath()); + } + return (T) value; + } + + @Override + public boolean getBoolean(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getBooleanList(@NonNull DriverOption option) { + return get(option); + } + + @Override + public int getInt(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getIntList(@NonNull DriverOption option) { + return get(option); + } + + @Override + public long getLong(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getLongList(@NonNull DriverOption option) { + return get(option); + } + + @Override + public double getDouble(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getDoubleList(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public String getString(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getStringList(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public Map getStringMap(@NonNull DriverOption option) { + return get(option); + } + + @Override + public long getBytes(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getBytesList(DriverOption option) { + return get(option); + } + + @NonNull + @Override + public Duration getDuration(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public List getDurationList(@NonNull DriverOption option) { + return get(option); + } + + @NonNull + @Override + public SortedSet> entrySet() { + ImmutableSortedSet.Builder> builder = + ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); + for (Map backingMap : + // builder.add() ignores duplicates, so process higher precedence backing maps first + ImmutableList.of(profile, defaultProfile)) { + for (Map.Entry entry : backingMap.entrySet()) { + builder.add(new AbstractMap.SimpleEntry<>(entry.getKey().getPath(), entry.getValue())); + } + } + return builder.build(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java index 93a1b0b8316..f1bfbea8249 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoader.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,7 @@ */ package com.datastax.oss.driver.internal.core.config.typesafe; +import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; @@ -25,22 +28,30 @@ import com.datastax.oss.driver.internal.core.context.EventBus; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.util.Loggers; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; +import com.typesafe.config.ConfigParseOptions; import edu.umd.cs.findbugs.annotations.NonNull; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.ScheduledFuture; +import java.io.File; +import java.net.URL; import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** The default loader; it is based on Typesafe Config and reloads at a configurable interval. */ +/** + * The default loader; it is based on Typesafe Config and optionally reloads at a configurable + * interval. + */ @ThreadSafe public class DefaultDriverConfigLoader implements DriverConfigLoader { @@ -51,29 +62,141 @@ public class DefaultDriverConfigLoader implements DriverConfigLoader { public static final Supplier DEFAULT_CONFIG_SUPPLIER = () -> { ConfigFactory.invalidateCaches(); - return ConfigFactory.load().getConfig(DEFAULT_ROOT_PATH); + // The thread's context class loader will be used for application classpath resources, + // while the driver class loader will be used for reference classpath resources. + return ConfigFactory.defaultOverrides() + .withFallback(ConfigFactory.defaultApplication()) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) + .resolve() + .getConfig(DEFAULT_ROOT_PATH); }; + @NonNull + public static DefaultDriverConfigLoader fromClasspath( + @NonNull String resourceBaseName, @NonNull ClassLoader appClassLoader) { + return new DefaultDriverConfigLoader( + () -> { + ConfigFactory.invalidateCaches(); + Config config = + ConfigFactory.defaultOverrides() + .withFallback( + ConfigFactory.parseResourcesAnySyntax( + resourceBaseName, + ConfigParseOptions.defaults().setClassLoader(appClassLoader))) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) + .resolve(); + return config.getConfig(DEFAULT_ROOT_PATH); + }); + } + + @NonNull + public static DriverConfigLoader fromFile(@NonNull File file) { + return new DefaultDriverConfigLoader( + () -> { + ConfigFactory.invalidateCaches(); + Config config = + ConfigFactory.defaultOverrides() + .withFallback(ConfigFactory.parseFileAnySyntax(file)) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) + .resolve(); + return config.getConfig(DEFAULT_ROOT_PATH); + }); + } + + @NonNull + public static DriverConfigLoader fromUrl(@NonNull URL url) { + return new DefaultDriverConfigLoader( + () -> { + ConfigFactory.invalidateCaches(); + Config config = + ConfigFactory.defaultOverrides() + .withFallback(ConfigFactory.parseURL(url)) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) + .resolve(); + return config.getConfig(DEFAULT_ROOT_PATH); + }); + } + + @NonNull + public static DefaultDriverConfigLoader fromString(@NonNull String contents) { + return new DefaultDriverConfigLoader( + () -> { + ConfigFactory.invalidateCaches(); + Config config = + ConfigFactory.defaultOverrides() + .withFallback(ConfigFactory.parseString(contents)) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) + .resolve(); + return config.getConfig(DEFAULT_ROOT_PATH); + }, + false); + } + private final Supplier configSupplier; private final TypesafeDriverConfig driverConfig; + private final boolean supportsReloading; private volatile SingleThreaded singleThreaded; /** * Builds a new instance with the default Typesafe config loading rules (documented in {@link - * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options. + * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options. This + * constructor enables config reloading (that is, {@link #supportsReloading} will return true). + * + *

      Application-specific classpath resources will be located using the {@linkplain + * Thread#getContextClassLoader() the current thread's context class loader}. This might not be + * suitable for OSGi deployments, which should use {@link #DefaultDriverConfigLoader(ClassLoader)} + * instead. */ public DefaultDriverConfigLoader() { this(DEFAULT_CONFIG_SUPPLIER); } + /** + * Builds a new instance with the default Typesafe config loading rules (documented in {@link + * SessionBuilder#withConfigLoader(DriverConfigLoader)}) and the core driver options, except that + * application-specific classpath resources will be located using the provided {@link ClassLoader} + * instead of {@linkplain Thread#getContextClassLoader() the current thread's context class + * loader}. This constructor enables config reloading (that is, {@link #supportsReloading} will + * return true). + */ + public DefaultDriverConfigLoader(@NonNull ClassLoader appClassLoader) { + this( + () -> { + ConfigFactory.invalidateCaches(); + return ConfigFactory.defaultOverrides() + .withFallback(ConfigFactory.defaultApplication(appClassLoader)) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())) + .resolve() + .getConfig(DEFAULT_ROOT_PATH); + }); + } + /** * Builds an instance with custom arguments, if you want to load the configuration from somewhere - * else. + * else. This constructor enables config reloading (that is, {@link #supportsReloading} will + * return true). + * + * @param configSupplier A supplier for the Typesafe {@link Config}; it will be invoked once when + * this object is instantiated, and at each reload attempt, if reloading is enabled. */ - public DefaultDriverConfigLoader(Supplier configSupplier) { + public DefaultDriverConfigLoader(@NonNull Supplier configSupplier) { + this(configSupplier, true); + } + + /** + * Builds an instance with custom arguments, if you want to load the configuration from somewhere + * else and/or modify config reload behavior. + * + * @param configSupplier A supplier for the Typesafe {@link Config}; it will be invoked once when + * this object is instantiated, and at each reload attempt, if reloading is enabled. + * @param supportsReloading Whether config reloading should be enabled or not. + */ + public DefaultDriverConfigLoader( + @NonNull Supplier configSupplier, boolean supportsReloading) { this.configSupplier = configSupplier; this.driverConfig = new TypesafeDriverConfig(configSupplier.get()); + this.supportsReloading = supportsReloading; } @NonNull @@ -89,15 +212,21 @@ public void onDriverInit(@NonNull DriverContext driverContext) { @NonNull @Override - public CompletionStage reload() { - CompletableFuture result = new CompletableFuture<>(); - RunOrSchedule.on(singleThreaded.adminExecutor, () -> singleThreaded.reload(result)); - return result; + public final CompletionStage reload() { + if (supportsReloading) { + CompletableFuture result = new CompletableFuture<>(); + RunOrSchedule.on(singleThreaded.adminExecutor, () -> singleThreaded.reload(result)); + return result; + } else { + return CompletableFutures.failedFuture( + new UnsupportedOperationException( + "This instance of DefaultDriverConfigLoader does not support reloading")); + } } @Override - public boolean supportsReloading() { - return true; + public final boolean supportsReloading() { + return supportsReloading; } /** For internal use only, this leaks a Typesafe config type. */ @@ -109,8 +238,14 @@ public Supplier getConfigSupplier() { @Override public void close() { SingleThreaded singleThreaded = this.singleThreaded; - if (singleThreaded != null) { - RunOrSchedule.on(singleThreaded.adminExecutor, singleThreaded::close); + if (singleThreaded != null && !singleThreaded.adminExecutor.terminationFuture().isDone()) { + try { + RunOrSchedule.on(singleThreaded.adminExecutor, singleThreaded::close); + } catch (RejectedExecutionException e) { + // Checking the future is racy, there is still a tiny window that could get us here. + // We can safely ignore this error because, if the execution is rejected, the periodic + // reload task, if any, has been already cancelled. + } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java index 233df8e31ab..3096fd85ffb 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +18,6 @@ package com.datastax.oss.driver.internal.core.config.typesafe; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; @@ -33,7 +34,8 @@ @NotThreadSafe @Deprecated public class DefaultDriverConfigLoaderBuilder - implements DriverOptionConfigBuilder { + implements com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder< + DefaultDriverConfigLoaderBuilder> { private NullAllowingImmutableMap.Builder values = NullAllowingImmutableMap.builder(); @@ -86,7 +88,10 @@ public DefaultDriverConfigLoaderBuilder with(@NonNull String path, @Nullable Obj } /** A builder for specifying options at a profile level using {@code withXXX} methods. */ - public static final class ProfileBuilder implements DriverOptionConfigBuilder { + @Deprecated + public static final class ProfileBuilder + implements com.datastax.oss.driver.internal.core.config.DriverOptionConfigBuilder< + ProfileBuilder> { final NullAllowingImmutableMap.Builder values = NullAllowingImmutableMap.builder(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java index 798566f97a9..2a7f6379362 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +17,18 @@ */ package com.datastax.oss.driver.internal.core.config.typesafe; +import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.config.DriverOption; import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.time.Duration; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -37,31 +40,65 @@ public class DefaultProgrammaticDriverConfigLoaderBuilder implements ProgrammaticDriverConfigLoaderBuilder { public static final Supplier DEFAULT_FALLBACK_SUPPLIER = - () -> ConfigFactory.defaultApplication().withFallback(ConfigFactory.defaultReference()); + () -> + ConfigFactory.defaultApplication() + // Do not remove root path here, it must be done after merging configs + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())); + + private final Map values = new HashMap<>(); - private final NullAllowingImmutableMap.Builder values = - NullAllowingImmutableMap.builder(); private final Supplier fallbackSupplier; private final String rootPath; private String currentProfileName = DriverExecutionProfile.DEFAULT_NAME; /** + * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} with default + * settings. + * + *

      Fallback configuration for options that haven't been specified programmatically will be + * obtained from standard classpath resources. Application-specific classpath resources will be + * located using the {@linkplain Thread#getContextClassLoader() the current thread's context class + * loader}. This might not be suitable for OSGi deployments, which should use {@link + * #DefaultProgrammaticDriverConfigLoaderBuilder(ClassLoader)} instead. + */ + public DefaultProgrammaticDriverConfigLoaderBuilder() { + this(DEFAULT_FALLBACK_SUPPLIER, DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); + } + + /** + * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} with default + * settings but a custom class loader. + * + *

      Fallback configuration for options that haven't been specified programmatically will be + * obtained from standard classpath resources. Application-specific classpath resources will be + * located using the provided {@link ClassLoader} instead of {@linkplain + * Thread#getContextClassLoader() the current thread's context class loader}. + */ + public DefaultProgrammaticDriverConfigLoaderBuilder(@NonNull ClassLoader appClassLoader) { + this( + () -> + ConfigFactory.defaultApplication(appClassLoader) + .withFallback(ConfigFactory.defaultReference(CqlSession.class.getClassLoader())), + DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); + } + + /** + * Creates an instance of {@link DefaultProgrammaticDriverConfigLoaderBuilder} using a custom + * fallback config supplier. + * * @param fallbackSupplier the supplier that will provide fallback configuration for options that * haven't been specified programmatically. * @param rootPath the root path used in non-programmatic sources (fallback reference.conf and - * system properties). + * system properties). In most cases it should be {@link + * DefaultDriverConfigLoader#DEFAULT_ROOT_PATH}. Cannot be null but can be empty. */ public DefaultProgrammaticDriverConfigLoaderBuilder( - Supplier fallbackSupplier, String rootPath) { + @NonNull Supplier fallbackSupplier, @NonNull String rootPath) { this.fallbackSupplier = fallbackSupplier; this.rootPath = rootPath; } - public DefaultProgrammaticDriverConfigLoaderBuilder() { - this(DEFAULT_FALLBACK_SUPPLIER, DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); - } - private ProgrammaticDriverConfigLoaderBuilder with( @NonNull DriverOption option, @Nullable Object value) { return with(option.getPath(), value); @@ -215,13 +252,16 @@ public DriverConfigLoader build() { .withFallback(programmaticConfig) .withFallback(fallbackSupplier.get()) .resolve(); + // Only remove rootPath after the merge between system properties + // and fallback configuration, since both are supposed to + // contain the same rootPath prefix. return rootPath.isEmpty() ? config : config.getConfig(rootPath); }); } private Config buildConfig() { Config config = ConfigFactory.empty(); - for (Map.Entry entry : values.build().entrySet()) { + for (Map.Entry entry : values.entrySet()) { config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(entry.getValue())); } return config; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java index 8ed6b80dfd2..e1d8c779f2c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfig.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,14 +21,20 @@ import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigObject; +import com.typesafe.config.ConfigOrigin; +import com.typesafe.config.ConfigOriginFactory; import com.typesafe.config.ConfigValue; +import com.typesafe.config.ConfigValueFactory; import edu.umd.cs.findbugs.annotations.NonNull; +import java.net.URL; import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,14 +43,19 @@ public class TypesafeDriverConfig implements DriverConfig { private static final Logger LOG = LoggerFactory.getLogger(TypesafeDriverConfig.class); + private static final ConfigOrigin DEFAULT_OVERRIDES_ORIGIN = + ConfigOriginFactory.newSimple("default was overridden programmatically"); private final ImmutableMap profiles; // Only used to detect if reload saw any change private volatile Config lastLoadedConfig; + private final Map defaultOverrides = new ConcurrentHashMap<>(); + + private final TypesafeDriverExecutionProfile.Base defaultProfile; + public TypesafeDriverConfig(Config config) { this.lastLoadedConfig = config; - Map profileConfigs = extractProfiles(config); ImmutableMap.Builder builder = @@ -53,10 +66,12 @@ public TypesafeDriverConfig(Config config) { new TypesafeDriverExecutionProfile.Base(entry.getKey(), entry.getValue())); } this.profiles = builder.build(); + this.defaultProfile = profiles.get(DriverExecutionProfile.DEFAULT_NAME); } /** @return whether the configuration changed */ public boolean reload(Config config) { + config = applyDefaultOverrides(config); if (config.equals(lastLoadedConfig)) { return false; } else { @@ -126,14 +141,22 @@ private Map extractProfiles(Config sourceConfig) { return result.build(); } + @Override + public DriverExecutionProfile getDefaultProfile() { + return defaultProfile; + } + @NonNull @Override public DriverExecutionProfile getProfile(@NonNull String profileName) { - Preconditions.checkArgument( - profiles.containsKey(profileName), - "Unknown profile '%s'. Check your configuration.", - profileName); - return profiles.get(profileName); + if (profileName.equals(DriverExecutionProfile.DEFAULT_NAME)) { + return defaultProfile; + } + return Optional.ofNullable(profiles.get(profileName)) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format("Unknown profile '%s'. Check your configuration.", profileName))); } @NonNull @@ -141,4 +164,50 @@ public DriverExecutionProfile getProfile(@NonNull String profileName) { public Map getProfiles() { return profiles; } + + /** + * Replace the given options, only if the original values came from {@code + * reference.conf}: if the option was set explicitly in {@code application.conf}, then the + * override is ignored. + * + *

      The overrides are also taken into account in profiles, and survive reloads. If this method + * is invoked multiple times, the last value for each option will be used. Note that it is + * currently not possible to use {@code null} as a value. + */ + public void overrideDefaults(@NonNull Map overrides) { + defaultOverrides.putAll(overrides); + reload(lastLoadedConfig); + } + + private Config applyDefaultOverrides(Config source) { + Config result = source; + for (Map.Entry entry : defaultOverrides.entrySet()) { + String path = entry.getKey().getPath(); + Object value = entry.getValue(); + if (isDefault(source, path)) { + LOG.debug("Replacing default value for {} by {}", path, value); + result = + result.withValue( + path, ConfigValueFactory.fromAnyRef(value).withOrigin(DEFAULT_OVERRIDES_ORIGIN)); + } else { + LOG.debug( + "Ignoring default override for {} because the user has overridden the value", path); + } + } + return result; + } + + // Whether the value in the given path comes from the reference.conf in the driver JAR. + private static boolean isDefault(Config config, String path) { + if (!config.hasPath(path)) { + return false; + } + ConfigOrigin origin = config.getValue(path).origin(); + if (origin.equals(DEFAULT_OVERRIDES_ORIGIN)) { + // Same default was overridden twice, should use the last value + return true; + } + URL url = origin.url(); + return url != null && url.toString().endsWith("reference.conf"); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java index 31275a4acce..b7dd5abe42e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverExecutionProfile.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,7 +32,6 @@ import java.time.Duration; import java.util.AbstractMap; import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Set; @@ -64,6 +65,7 @@ public boolean getBoolean(@NonNull DriverOption option) { return getCached(option.getPath(), getEffectiveOptions()::getBoolean); } + // We override `with*` methods because they can be implemented a bit better with Typesafe config @NonNull @Override public DriverExecutionProfile withBoolean(@NonNull DriverOption option, boolean value) { @@ -269,7 +271,7 @@ public DriverExecutionProfile without(@NonNull DriverOption option) { @NonNull @Override public Object getComparisonKey(@NonNull DriverOption option) { - // No need to cache this, it's only used for policy initialization + // This method has a default implementation in the interface, but here we can do it in one line: return getEffectiveOptions().getConfig(option.getPath()); } @@ -277,7 +279,7 @@ public Object getComparisonKey(@NonNull DriverOption option) { @Override public SortedSet> entrySet() { ImmutableSortedSet.Builder> builder = - ImmutableSortedSet.orderedBy(Comparator.comparing(Map.Entry::getKey)); + ImmutableSortedSet.orderedBy(Map.Entry.comparingByKey()); for (Map.Entry entry : getEffectiveOptions().entrySet()) { builder.add(new AbstractMap.SimpleEntry<>(entry.getKey(), entry.getValue().unwrapped())); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java index f59350afff5..72e0ba5ae3d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/config/typesafe/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java index ccead526906..03edb38f8d4 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java index 2320dee255a..5fa04cb63d6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java index da99c1d9a87..3074bda2398 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContext.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,14 @@ */ package com.datastax.oss.driver.internal.core.context; +import static com.datastax.oss.driver.internal.core.util.Dependency.JACKSON; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.internal.core.InsightsClientLifecycleListener; +import com.datastax.dse.driver.internal.core.type.codec.DseTypeCodecsRegistrar; +import com.datastax.dse.protocol.internal.DseProtocolV1ClientCodecs; +import com.datastax.dse.protocol.internal.DseProtocolV2ClientCodecs; +import com.datastax.dse.protocol.internal.ProtocolV4ClientCodecsForDse; import com.datastax.oss.driver.api.core.ProtocolVersion; import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; import com.datastax.oss.driver.api.core.auth.AuthProvider; @@ -24,6 +34,7 @@ import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeStateListener; import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; @@ -33,21 +44,28 @@ import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; import com.datastax.oss.driver.api.core.time.TimestampGenerator; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; import com.datastax.oss.driver.api.core.tracker.RequestTracker; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.CassandraProtocolVersionRegistry; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; +import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.channel.ChannelFactory; import com.datastax.oss.driver.internal.core.channel.DefaultWriteCoalescer; import com.datastax.oss.driver.internal.core.channel.WriteCoalescer; import com.datastax.oss.driver.internal.core.control.ControlConnection; +import com.datastax.oss.driver.internal.core.metadata.CloudTopologyMonitor; import com.datastax.oss.driver.internal.core.metadata.DefaultTopologyMonitor; import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; import com.datastax.oss.driver.internal.core.metadata.MetadataManager; +import com.datastax.oss.driver.internal.core.metadata.MultiplexingNodeStateListener; +import com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener; import com.datastax.oss.driver.internal.core.metadata.TopologyMonitor; +import com.datastax.oss.driver.internal.core.metadata.schema.MultiplexingSchemaChangeListener; +import com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DefaultSchemaParserFactory; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; import com.datastax.oss.driver.internal.core.metadata.schema.queries.DefaultSchemaQueriesFactory; @@ -56,34 +74,49 @@ import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenFactoryRegistry; import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; -import com.datastax.oss.driver.internal.core.metrics.DropwizardMetricsFactory; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; +import com.datastax.oss.driver.internal.core.protocol.BuiltInCompressors; import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodec; -import com.datastax.oss.driver.internal.core.protocol.Lz4Compressor; -import com.datastax.oss.driver.internal.core.protocol.SnappyCompressor; import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; import com.datastax.oss.driver.internal.core.servererrors.WriteTypeRegistry; +import com.datastax.oss.driver.internal.core.session.BuiltInRequestProcessors; import com.datastax.oss.driver.internal.core.session.PoolManager; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; import com.datastax.oss.driver.internal.core.ssl.JdkSslHandlerFactory; import com.datastax.oss.driver.internal.core.ssl.SslHandlerFactory; +import com.datastax.oss.driver.internal.core.tracker.MultiplexingRequestTracker; +import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; +import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; import com.datastax.oss.driver.internal.core.util.Reflection; import com.datastax.oss.driver.internal.core.util.concurrent.CycleDetector; import com.datastax.oss.driver.internal.core.util.concurrent.LazyReference; import com.datastax.oss.protocol.internal.Compressor; import com.datastax.oss.protocol.internal.FrameCodec; +import com.datastax.oss.protocol.internal.PrimitiveCodec; +import com.datastax.oss.protocol.internal.ProtocolV3ClientCodecs; +import com.datastax.oss.protocol.internal.ProtocolV5ClientCodecs; +import com.datastax.oss.protocol.internal.ProtocolV6ClientCodecs; +import com.datastax.oss.protocol.internal.SegmentCodec; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import io.netty.buffer.ByteBuf; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Default implementation of the driver context. @@ -105,6 +138,7 @@ @ThreadSafe public class DefaultDriverContext implements InternalDriverContext { + private static final Logger LOG = LoggerFactory.getLogger(InternalDriverContext.class); private static final AtomicInteger SESSION_NAME_COUNTER = new AtomicInteger(); protected final CycleDetector cycleDetector = @@ -126,17 +160,18 @@ public class DefaultDriverContext implements InternalDriverContext { new LazyReference<>("timestampGenerator", this::buildTimestampGenerator, cycleDetector); private final LazyReference addressTranslatorRef = new LazyReference<>("addressTranslator", this::buildAddressTranslator, cycleDetector); - private final LazyReference> authProviderRef = - new LazyReference<>("authProvider", this::buildAuthProvider, cycleDetector); - private final LazyReference> sslEngineFactoryRef = - new LazyReference<>("sslEngineFactory", this::buildSslEngineFactory, cycleDetector); + private final LazyReference> sslEngineFactoryRef; private final LazyReference eventBusRef = new LazyReference<>("eventBus", this::buildEventBus, cycleDetector); private final LazyReference> compressorRef = new LazyReference<>("compressor", this::buildCompressor, cycleDetector); + private final LazyReference> primitiveCodecRef = + new LazyReference<>("primitiveCodec", this::buildPrimitiveCodec, cycleDetector); private final LazyReference> frameCodecRef = new LazyReference<>("frameCodec", this::buildFrameCodec, cycleDetector); + private final LazyReference> segmentCodecRef = + new LazyReference<>("segmentCodec", this::buildSegmentCodec, cycleDetector); private final LazyReference protocolVersionRegistryRef = new LazyReference<>( "protocolVersionRegistry", this::buildProtocolVersionRegistry, cycleDetector); @@ -178,13 +213,19 @@ public class DefaultDriverContext implements InternalDriverContext { new LazyReference<>("poolManager", this::buildPoolManager, cycleDetector); private final LazyReference metricsFactoryRef = new LazyReference<>("metricsFactory", this::buildMetricsFactory, cycleDetector); + private final LazyReference metricIdGeneratorRef = + new LazyReference<>("metricIdGenerator", this::buildMetricIdGenerator, cycleDetector); private final LazyReference requestThrottlerRef = new LazyReference<>("requestThrottler", this::buildRequestThrottler, cycleDetector); - private final LazyReference> startupOptionsRef = - new LazyReference<>("startupOptions", this::buildStartupOptions, cycleDetector); + private final LazyReference startupOptionsRef = + new LazyReference<>("startupOptionsFactory", this::buildStartupOptionsFactory, cycleDetector); private final LazyReference nodeStateListenerRef; private final LazyReference schemaChangeListenerRef; private final LazyReference requestTrackerRef; + private final LazyReference> requestIdGeneratorRef; + private final LazyReference> authProviderRef; + private final LazyReference> lifecycleListenersRef = + new LazyReference<>("lifecycleListeners", this::buildLifecycleListeners, cycleDetector); private final DriverConfig config; private final DriverConfigLoader configLoader; @@ -196,9 +237,18 @@ public class DefaultDriverContext implements InternalDriverContext { private final RequestTracker requestTrackerFromBuilder; private final Map localDatacentersFromBuilder; private final Map> nodeFiltersFromBuilder; + private final Map nodeDistanceEvaluatorsFromBuilder; private final ClassLoader classLoader; + private final InetSocketAddress cloudProxyAddress; private final LazyReference requestLogFormatterRef = new LazyReference<>("requestLogFormatter", this::buildRequestLogFormatter, cycleDetector); + private final UUID startupClientId; + private final String startupApplicationName; + private final String startupApplicationVersion; + private final Object metricRegistry; + // A stack trace captured in the constructor. Used to extract information about the client + // application. + private final StackTraceElement[] initStackTrace; public DefaultDriverContext( DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { @@ -211,8 +261,7 @@ public DefaultDriverContext( this.sessionName = "s" + SESSION_NAME_COUNTER.getAndIncrement(); } this.localDatacentersFromBuilder = programmaticArguments.getLocalDatacenters(); - this.codecRegistry = - buildCodecRegistry(this.sessionName, programmaticArguments.getTypeCodecs()); + this.codecRegistry = buildCodecRegistry(programmaticArguments); this.nodeStateListenerFromBuilder = programmaticArguments.getNodeStateListener(); this.nodeStateListenerRef = new LazyReference<>( @@ -226,11 +275,43 @@ public DefaultDriverContext( () -> buildSchemaChangeListener(schemaChangeListenerFromBuilder), cycleDetector); this.requestTrackerFromBuilder = programmaticArguments.getRequestTracker(); + + this.authProviderRef = + new LazyReference<>( + "authProvider", + () -> buildAuthProvider(programmaticArguments.getAuthProvider()), + cycleDetector); this.requestTrackerRef = new LazyReference<>( "requestTracker", () -> buildRequestTracker(requestTrackerFromBuilder), cycleDetector); - this.nodeFiltersFromBuilder = programmaticArguments.getNodeFilters(); + this.requestIdGeneratorRef = + new LazyReference<>( + "requestIdGenerator", + () -> buildRequestIdGenerator(programmaticArguments.getRequestIdGenerator()), + cycleDetector); + this.sslEngineFactoryRef = + new LazyReference<>( + "sslEngineFactory", + () -> buildSslEngineFactory(programmaticArguments.getSslEngineFactory()), + cycleDetector); + @SuppressWarnings("deprecation") + Map> nodeFilters = programmaticArguments.getNodeFilters(); + this.nodeFiltersFromBuilder = nodeFilters; + this.nodeDistanceEvaluatorsFromBuilder = programmaticArguments.getNodeDistanceEvaluators(); this.classLoader = programmaticArguments.getClassLoader(); + this.cloudProxyAddress = programmaticArguments.getCloudProxyAddress(); + this.startupClientId = programmaticArguments.getStartupClientId(); + this.startupApplicationName = programmaticArguments.getStartupApplicationName(); + this.startupApplicationVersion = programmaticArguments.getStartupApplicationVersion(); + StackTraceElement[] stackTrace; + try { + stackTrace = Thread.currentThread().getStackTrace(); + } catch (Exception ex) { + // ignore and use empty + stackTrace = new StackTraceElement[] {}; + } + this.initStackTrace = stackTrace; + this.metricRegistry = programmaticArguments.getMetricRegistry(); } /** @@ -261,25 +342,31 @@ public DefaultDriverContext( } /** - * Builds a map of options to send in a Startup message. + * Returns builder of options to send in a Startup message. * * @see #getStartupOptions() */ - protected Map buildStartupOptions() { - return new StartupOptionsBuilder(this).build(); + protected StartupOptionsBuilder buildStartupOptionsFactory() { + return new StartupOptionsBuilder(this) + .withClientId(startupClientId) + .withApplicationName(startupApplicationName) + .withApplicationVersion(startupApplicationVersion); } protected Map buildLoadBalancingPolicies() { return Reflection.buildFromConfigProfiles( this, + DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, DefaultDriverOption.LOAD_BALANCING_POLICY, LoadBalancingPolicy.class, - "com.datastax.oss.driver.internal.core.loadbalancing"); + "com.datastax.oss.driver.internal.core.loadbalancing", + "com.datastax.dse.driver.internal.core.loadbalancing"); } protected Map buildRetryPolicies() { return Reflection.buildFromConfigProfiles( this, + DefaultDriverOption.RETRY_POLICY_CLASS, DefaultDriverOption.RETRY_POLICY, RetryPolicy.class, "com.datastax.oss.driver.internal.core.retry"); @@ -288,6 +375,7 @@ protected Map buildRetryPolicies() { protected Map buildSpeculativeExecutionPolicies() { return Reflection.buildFromConfigProfiles( this, + DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY, SpeculativeExecutionPolicy.class, "com.datastax.oss.driver.internal.core.specex"); @@ -335,53 +423,49 @@ protected AddressTranslator buildAddressTranslator() { DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS))); } - protected Optional buildAuthProvider() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.AUTH_PROVIDER_CLASS, - AuthProvider.class, - "com.datastax.oss.driver.internal.core.auth"); - } - - protected Optional buildSslEngineFactory() { - return Reflection.buildFromConfig( - this, - DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, - SslEngineFactory.class, - "com.datastax.oss.driver.internal.core.ssl"); + protected Optional buildSslEngineFactory(SslEngineFactory factoryFromBuilder) { + return (factoryFromBuilder != null) + ? Optional.of(factoryFromBuilder) + : Reflection.buildFromConfig( + this, + DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, + SslEngineFactory.class, + "com.datastax.oss.driver.internal.core.ssl"); } protected EventBus buildEventBus() { return new EventBus(getSessionName()); } - @SuppressWarnings("unchecked") protected Compressor buildCompressor() { DriverExecutionProfile defaultProfile = getConfig().getDefaultProfile(); - if (defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_COMPRESSION)) { - String name = defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION); - if (name.equalsIgnoreCase("lz4")) { - return new Lz4Compressor(this); - } else if (name.equalsIgnoreCase("snappy")) { - return new SnappyCompressor(this); - } else { - throw new IllegalArgumentException( - String.format( - "Unsupported compression algorithm '%s' (from configuration option %s)", - name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); - } - } else { - return Compressor.none(); - } + String name = defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none"); + assert name != null : "should use default value"; + return BuiltInCompressors.newInstance(name, this); + } + + protected PrimitiveCodec buildPrimitiveCodec() { + return new ByteBufPrimitiveCodec(getNettyOptions().allocator()); } protected FrameCodec buildFrameCodec() { - return FrameCodec.defaultClient( - new ByteBufPrimitiveCodec(getNettyOptions().allocator()), getCompressor()); + return new FrameCodec<>( + getPrimitiveCodec(), + getCompressor(), + new ProtocolV3ClientCodecs(), + new ProtocolV4ClientCodecsForDse(), + new ProtocolV5ClientCodecs(), + new ProtocolV6ClientCodecs(), + new DseProtocolV1ClientCodecs(), + new DseProtocolV2ClientCodecs()); + } + + protected SegmentCodec buildSegmentCodec() { + return new SegmentCodec<>(getPrimitiveCodec(), getCompressor()); } protected ProtocolVersionRegistry buildProtocolVersionRegistry() { - return new CassandraProtocolVersionRegistry(getSessionName()); + return new DefaultProtocolVersionRegistry(getSessionName()); } protected ConsistencyLevelRegistry buildConsistencyLevelRegistry() { @@ -397,8 +481,8 @@ protected NettyOptions buildNettyOptions() { } protected Optional buildSslHandlerFactory() { - // If a JDK-based factory was provided through the public API, syncWrapper it - return buildSslEngineFactory().map(JdkSslHandlerFactory::new); + // If a JDK-based factory was provided through the public API, wrap it + return getSslEngineFactory().map(JdkSslHandlerFactory::new); // For more advanced options (like using Netty's native OpenSSL support instead of the JDK), // extend DefaultDriverContext and override this method @@ -413,7 +497,10 @@ protected ChannelFactory buildChannelFactory() { } protected TopologyMonitor buildTopologyMonitor() { - return new DefaultTopologyMonitor(this); + if (cloudProxyAddress == null) { + return new DefaultTopologyMonitor(this); + } + return new CloudTopologyMonitor(this, cloudProxyAddress); } protected MetadataManager buildMetadataManager() { @@ -429,12 +516,20 @@ protected ControlConnection buildControlConnection() { } protected RequestProcessorRegistry buildRequestProcessorRegistry() { - return RequestProcessorRegistry.defaultCqlProcessors(getSessionName()); + List> processors = + BuiltInRequestProcessors.createDefaultProcessors(this); + return new RequestProcessorRegistry( + getSessionName(), processors.toArray(new RequestProcessor[0])); } - protected CodecRegistry buildCodecRegistry(String logPrefix, List> codecs) { - TypeCodec[] array = new TypeCodec[codecs.size()]; - return new DefaultCodecRegistry(logPrefix, codecs.toArray(array)); + protected CodecRegistry buildCodecRegistry(ProgrammaticArguments arguments) { + MutableCodecRegistry registry = arguments.getCodecRegistry(); + if (registry == null) { + registry = new DefaultCodecRegistry(this.sessionName); + } + registry.register(arguments.getTypeCodecs()); + DseTypeCodecsRegistrar.registerDseCodecs(registry); + return registry; } protected SchemaQueriesFactory buildSchemaQueriesFactory() { @@ -458,7 +553,33 @@ protected PoolManager buildPoolManager() { } protected MetricsFactory buildMetricsFactory() { - return new DropwizardMetricsFactory(this); + return Reflection.buildFromConfig( + this, + DefaultDriverOption.METRICS_FACTORY_CLASS, + MetricsFactory.class, + "com.datastax.oss.driver.internal.core.metrics", + "com.datastax.oss.driver.internal.metrics.microprofile", + "com.datastax.oss.driver.internal.metrics.micrometer") + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + "Missing metrics factory, check your config (%s)", + DefaultDriverOption.METRICS_FACTORY_CLASS))); + } + + protected MetricIdGenerator buildMetricIdGenerator() { + return Reflection.buildFromConfig( + this, + DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, + MetricIdGenerator.class, + "com.datastax.oss.driver.internal.core.metrics") + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + "Missing metric descriptor, check your config (%s)", + DefaultDriverOption.METRICS_ID_GENERATOR_CLASS))); } protected RequestThrottler buildRequestThrottler() { @@ -477,52 +598,156 @@ protected RequestThrottler buildRequestThrottler() { protected NodeStateListener buildNodeStateListener( NodeStateListener nodeStateListenerFromBuilder) { - return (nodeStateListenerFromBuilder != null) - ? nodeStateListenerFromBuilder - : Reflection.buildFromConfig( - this, - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS, - NodeStateListener.class, - "com.datastax.oss.driver.internal.core.metadata") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing node state listener, check your configuration (%s)", - DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS))); + List listeners = new ArrayList<>(); + if (nodeStateListenerFromBuilder != null) { + listeners.add(nodeStateListenerFromBuilder); + } + DefaultDriverOption newOption = DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES; + @SuppressWarnings("deprecation") + DefaultDriverOption legacyOption = DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS; + DriverExecutionProfile profile = config.getDefaultProfile(); + if (profile.isDefined(newOption)) { + listeners.addAll( + Reflection.buildFromConfigList( + this, + newOption, + NodeStateListener.class, + "com.datastax.oss.driver.internal.core.metadata")); + } + if (profile.isDefined(legacyOption)) { + LOG.warn( + "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", + legacyOption, + newOption); + Reflection.buildFromConfig( + this, + legacyOption, + NodeStateListener.class, + "com.datastax.oss.driver.internal.core.metadata") + .ifPresent(listeners::add); + } + if (listeners.isEmpty()) { + return new NoopNodeStateListener(this); + } else if (listeners.size() == 1) { + return listeners.get(0); + } else { + return new MultiplexingNodeStateListener(listeners); + } } protected SchemaChangeListener buildSchemaChangeListener( SchemaChangeListener schemaChangeListenerFromBuilder) { - return (schemaChangeListenerFromBuilder != null) - ? schemaChangeListenerFromBuilder - : Reflection.buildFromConfig( - this, - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS, - SchemaChangeListener.class, - "com.datastax.oss.driver.internal.core.metadata.schema") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing schema change listener, check your configuration (%s)", - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS))); + List listeners = new ArrayList<>(); + if (schemaChangeListenerFromBuilder != null) { + listeners.add(schemaChangeListenerFromBuilder); + } + DefaultDriverOption newOption = DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES; + @SuppressWarnings("deprecation") + DefaultDriverOption legacyOption = DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS; + DriverExecutionProfile profile = config.getDefaultProfile(); + if (profile.isDefined(newOption)) { + listeners.addAll( + Reflection.buildFromConfigList( + this, + newOption, + SchemaChangeListener.class, + "com.datastax.oss.driver.internal.core.metadata.schema")); + } + if (profile.isDefined(legacyOption)) { + LOG.warn( + "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", + legacyOption, + newOption); + Reflection.buildFromConfig( + this, + legacyOption, + SchemaChangeListener.class, + "com.datastax.oss.driver.internal.core.metadata.schema") + .ifPresent(listeners::add); + } + if (listeners.isEmpty()) { + return new NoopSchemaChangeListener(this); + } else if (listeners.size() == 1) { + return listeners.get(0); + } else { + return new MultiplexingSchemaChangeListener(listeners); + } } protected RequestTracker buildRequestTracker(RequestTracker requestTrackerFromBuilder) { - return (requestTrackerFromBuilder != null) - ? requestTrackerFromBuilder + List trackers = new ArrayList<>(); + if (requestTrackerFromBuilder != null) { + trackers.add(requestTrackerFromBuilder); + } + for (LoadBalancingPolicy lbp : this.getLoadBalancingPolicies().values()) { + lbp.getRequestTracker().ifPresent(trackers::add); + } + DefaultDriverOption newOption = DefaultDriverOption.REQUEST_TRACKER_CLASSES; + @SuppressWarnings("deprecation") + DefaultDriverOption legacyOption = DefaultDriverOption.REQUEST_TRACKER_CLASS; + DriverExecutionProfile profile = config.getDefaultProfile(); + if (profile.isDefined(newOption)) { + trackers.addAll( + Reflection.buildFromConfigList( + this, + newOption, + RequestTracker.class, + "com.datastax.oss.driver.internal.core.tracker")); + } + if (profile.isDefined(legacyOption)) { + LOG.warn( + "Option {} has been deprecated and will be removed in a future release; please use option {} instead.", + legacyOption, + newOption); + Reflection.buildFromConfig( + this, + legacyOption, + RequestTracker.class, + "com.datastax.oss.driver.internal.core.tracker") + .ifPresent(trackers::add); + } + if (trackers.isEmpty()) { + return new NoopRequestTracker(this); + } else if (trackers.size() == 1) { + return trackers.get(0); + } else { + return new MultiplexingRequestTracker(trackers); + } + } + + protected Optional buildRequestIdGenerator( + RequestIdGenerator requestIdGenerator) { + return (requestIdGenerator != null) + ? Optional.of(requestIdGenerator) : Reflection.buildFromConfig( - this, - DefaultDriverOption.REQUEST_TRACKER_CLASS, - RequestTracker.class, - "com.datastax.oss.driver.internal.core.tracker") - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - "Missing request tracker, check your configuration (%s)", - DefaultDriverOption.REQUEST_TRACKER_CLASS))); + this, + DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, + RequestIdGenerator.class, + "com.datastax.oss.driver.internal.core.tracker"); + } + + protected Optional buildAuthProvider(AuthProvider authProviderFromBuilder) { + return (authProviderFromBuilder != null) + ? Optional.of(authProviderFromBuilder) + : Reflection.buildFromConfig( + this, + DefaultDriverOption.AUTH_PROVIDER_CLASS, + AuthProvider.class, + "com.datastax.oss.driver.internal.core.auth", + "com.datastax.dse.driver.internal.core.auth"); + } + + protected List buildLifecycleListeners() { + if (DefaultDependencyChecker.isPresent(JACKSON)) { + return Collections.singletonList(new InsightsClientLifecycleListener(this, initStackTrace)); + } else { + if (config.getDefaultProfile().getBoolean(DseDriverOption.MONITOR_REPORTING_ENABLED)) { + LOG.info( + "Could not initialize Insights monitoring; " + + "this is normal if Jackson was explicitly excluded from classpath"); + } + return Collections.emptyList(); + } } @NonNull @@ -603,12 +828,24 @@ public Compressor getCompressor() { return compressorRef.get(); } + @NonNull + @Override + public PrimitiveCodec getPrimitiveCodec() { + return primitiveCodecRef.get(); + } + @NonNull @Override public FrameCodec getFrameCodec() { return frameCodecRef.get(); } + @NonNull + @Override + public SegmentCodec getSegmentCodec() { + return segmentCodecRef.get(); + } + @NonNull @Override public ProtocolVersionRegistry getProtocolVersionRegistry() { @@ -723,6 +960,12 @@ public MetricsFactory getMetricsFactory() { return metricsFactoryRef.get(); } + @NonNull + @Override + public MetricIdGenerator getMetricIdGenerator() { + return metricIdGeneratorRef.get(); + } + @NonNull @Override public RequestThrottler getRequestThrottler() { @@ -747,6 +990,12 @@ public RequestTracker getRequestTracker() { return requestTrackerRef.get(); } + @NonNull + @Override + public Optional getRequestIdGenerator() { + return requestIdGeneratorRef.get(); + } + @Nullable @Override public String getLocalDatacenter(@NonNull String profileName) { @@ -755,10 +1004,17 @@ public String getLocalDatacenter(@NonNull String profileName) { @Nullable @Override + @Deprecated public Predicate getNodeFilter(@NonNull String profileName) { return nodeFiltersFromBuilder.get(profileName); } + @Nullable + @Override + public NodeDistanceEvaluator getNodeDistanceEvaluator(@NonNull String profileName) { + return nodeDistanceEvaluatorsFromBuilder.get(profileName); + } + @Nullable @Override public ClassLoader getClassLoader() { @@ -780,7 +1036,8 @@ public ProtocolVersion getProtocolVersion() { @NonNull @Override public Map getStartupOptions() { - return startupOptionsRef.get(); + // startup options are calculated dynamically and may vary per connection + return startupOptionsRef.get().build(); } protected RequestLogFormatter buildRequestLogFormatter() { @@ -792,4 +1049,16 @@ protected RequestLogFormatter buildRequestLogFormatter() { public RequestLogFormatter getRequestLogFormatter() { return requestLogFormatterRef.get(); } + + @NonNull + @Override + public List getLifecycleListeners() { + return lifecycleListenersRef.get(); + } + + @Nullable + @Override + public Object getMetricRegistry() { + return metricRegistry; + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java index c4badab96a3..763a71f8b12 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/DefaultNettyOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +20,7 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation; +import com.datastax.oss.driver.internal.core.util.concurrent.PromiseCombiner; import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ThreadFactoryBuilder; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBufAllocator; @@ -34,7 +37,6 @@ import io.netty.util.concurrent.EventExecutorGroup; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GlobalEventExecutor; -import io.netty.util.concurrent.PromiseCombiner; import io.netty.util.internal.PlatformDependent; import java.time.Duration; import java.util.concurrent.ThreadFactory; @@ -61,6 +63,7 @@ public class DefaultNettyOptions implements NettyOptions { public DefaultNettyOptions(InternalDriverContext context) { this.config = context.getConfig().getDefaultProfile(); + boolean daemon = config.getBoolean(DefaultDriverOption.NETTY_DAEMON); int ioGroupSize = config.getInt(DefaultDriverOption.NETTY_IO_SIZE); this.ioShutdownQuietPeriod = config.getInt(DefaultDriverOption.NETTY_IO_SHUTDOWN_QUIET_PERIOD); this.ioShutdownTimeout = config.getInt(DefaultDriverOption.NETTY_IO_SHUTDOWN_TIMEOUT); @@ -78,6 +81,7 @@ public DefaultNettyOptions(InternalDriverContext context) { new ThreadFactoryBuilder() .setThreadFactory(safeFactory) .setNameFormat(context.getSessionName() + "-io-%d") + .setDaemon(daemon) .build(); this.ioEventLoopGroup = new NioEventLoopGroup(ioGroupSize, ioThreadFactory); @@ -85,6 +89,7 @@ public DefaultNettyOptions(InternalDriverContext context) { new ThreadFactoryBuilder() .setThreadFactory(safeFactory) .setNameFormat(context.getSessionName() + "-admin-%d") + .setDaemon(daemon) .build(); this.adminEventLoopGroup = new DefaultEventLoopGroup(adminGroupSize, adminThreadFactory); // setup the Timer @@ -92,6 +97,7 @@ public DefaultNettyOptions(InternalDriverContext context) { new ThreadFactoryBuilder() .setThreadFactory(safeFactory) .setNameFormat(context.getSessionName() + "-timer-%d") + .setDaemon(daemon) .build(); Duration tickDuration = config.getDuration(DefaultDriverOption.NETTY_TIMER_TICK_DURATION); @@ -104,12 +110,20 @@ public DefaultNettyOptions(InternalDriverContext context) { + "Please set advanced.netty.timer.tick-duration to 100 ms or higher.", tickDuration.toMillis()); } - timer = + this.timer = createTimer(timerThreadFactory, tickDuration); + } + + private HashedWheelTimer createTimer(ThreadFactory timerThreadFactory, Duration tickDuration) { + HashedWheelTimer timer = new HashedWheelTimer( timerThreadFactory, tickDuration.toNanos(), TimeUnit.NANOSECONDS, config.getInt(DefaultDriverOption.NETTY_TIMER_TICKS_PER_WHEEL)); + // Start the background thread eagerly during session initialization because + // it is a blocking operation. + timer.start(); + return timer; } @Override @@ -158,6 +172,11 @@ public void afterBootstrapInitialized(Bootstrap bootstrap) { int sendBufferSize = config.getInt(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE); bootstrap.option(ChannelOption.SO_SNDBUF, sendBufferSize); } + if (config.isDefined(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT)) { + Duration connectTimeout = config.getDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT); + bootstrap.option( + ChannelOption.CONNECT_TIMEOUT_MILLIS, Long.valueOf(connectTimeout.toMillis()).intValue()); + } } @Override @@ -169,22 +188,19 @@ public void afterChannelInitialized(Channel channel) { public Future onClose() { DefaultPromise closeFuture = new DefaultPromise<>(GlobalEventExecutor.INSTANCE); GlobalEventExecutor.INSTANCE.execute( - () -> { - PromiseCombiner combiner = new PromiseCombiner(GlobalEventExecutor.INSTANCE); - combiner.add( - adminEventLoopGroup.shutdownGracefully( - adminShutdownQuietPeriod, adminShutdownTimeout, adminShutdownUnit)); - combiner.add( - ioEventLoopGroup.shutdownGracefully( - ioShutdownQuietPeriod, ioShutdownTimeout, ioShutdownUnit)); - combiner.finish(closeFuture); - }); + () -> + PromiseCombiner.combine( + closeFuture, + adminEventLoopGroup.shutdownGracefully( + adminShutdownQuietPeriod, adminShutdownTimeout, adminShutdownUnit), + ioEventLoopGroup.shutdownGracefully( + ioShutdownQuietPeriod, ioShutdownTimeout, ioShutdownUnit))); closeFuture.addListener(f -> timer.stop()); return closeFuture; } @Override - public synchronized Timer getTimer() { + public Timer getTimer() { return timer; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java index b61e1cf8149..dd9ccaa9979 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/EventBus.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java index afc5dbce92e..81349b0c665 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/InternalDriverContext.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.context; import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; @@ -30,6 +33,7 @@ import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; import com.datastax.oss.driver.internal.core.metadata.token.ReplicationStrategyFactory; import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; import com.datastax.oss.driver.internal.core.servererrors.WriteTypeRegistry; @@ -39,6 +43,8 @@ import com.datastax.oss.driver.internal.core.tracker.RequestLogFormatter; import com.datastax.oss.protocol.internal.Compressor; import com.datastax.oss.protocol.internal.FrameCodec; +import com.datastax.oss.protocol.internal.PrimitiveCodec; +import com.datastax.oss.protocol.internal.SegmentCodec; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import io.netty.buffer.ByteBuf; @@ -57,9 +63,15 @@ public interface InternalDriverContext extends DriverContext { @NonNull Compressor getCompressor(); + @NonNull + PrimitiveCodec getPrimitiveCodec(); + @NonNull FrameCodec getFrameCodec(); + @NonNull + SegmentCodec getSegmentCodec(); + @NonNull ProtocolVersionRegistry getProtocolVersionRegistry(); @@ -117,6 +129,9 @@ public interface InternalDriverContext extends DriverContext { @NonNull MetricsFactory getMetricsFactory(); + @NonNull + MetricIdGenerator getMetricIdGenerator(); + /** * The value that was passed to {@link SessionBuilder#withLocalDatacenter(String,String)} for this * particular profile. If it was specified through the configuration instead, this method will @@ -129,14 +144,26 @@ public interface InternalDriverContext extends DriverContext { * This is the filter from {@link SessionBuilder#withNodeFilter(String, Predicate)}. If the filter * for this profile was specified through the configuration instead, this method will return * {@code null}. + * + * @deprecated Use {@link #getNodeDistanceEvaluator(String)} instead. */ @Nullable + @Deprecated Predicate getNodeFilter(@NonNull String profileName); + /** + * This is the node distance evaluator from {@link + * SessionBuilder#withNodeDistanceEvaluator(String, NodeDistanceEvaluator)}. If the evaluator for + * this profile was specified through the configuration instead, this method will return {@code + * null}. + */ + @Nullable + NodeDistanceEvaluator getNodeDistanceEvaluator(@NonNull String profileName); + /** * The {@link ClassLoader} to use to reflectively load class names defined in configuration. If - * null, the driver attempts to use {@link Thread#getContextClassLoader()} of the current thread - * or {@link com.datastax.oss.driver.internal.core.util.Reflection}'s {@link ClassLoader}. + * null, the driver attempts to use the same {@link ClassLoader} that loaded the core driver + * classes. */ @Nullable ClassLoader getClassLoader(); @@ -152,8 +179,9 @@ public interface InternalDriverContext extends DriverContext { /** * A list of additional components to notify of session lifecycle events. * - *

      The default implementation returns an empty list. Custom driver extensions might override - * this method to add their own components. + *

      For historical reasons, this method has a default implementation that returns an empty list. + * The built-in {@link DefaultDriverContext} overrides it to plug in the Insights monitoring + * listener. Custom driver extensions might override this method to add their own components. * *

      Note that the driver assumes that the returned list is constant; there is no way to add * listeners dynamically. @@ -171,4 +199,20 @@ default List getLifecycleListeners() { */ @NonNull RequestLogFormatter getRequestLogFormatter(); + + /** + * A metric registry for storing metrics. + * + *

      This will return the object from {@link + * SessionBuilder#withMetricRegistry(java.lang.Object)}. Access to this registry object is only + * intended for {@link MetricsFactory} implementations that need to expose a way to specify the + * registry external to the Factory implementation itself. + * + *

      The default metrics framework used by the Driver is DropWizard and does not need an external + * metrics registry object. + */ + @Nullable + default Object getMetricRegistry() { + return null; + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java index 31fcacfdcf1..39993e7094f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/LifecycleListener.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +19,7 @@ import com.datastax.oss.driver.api.core.session.SessionBuilder; -/** - * A component that gets notified of certain events in the session's lifecycle. - * - *

      This is intended for third-party extensions, no built-in components implement this. - */ +/** A component that gets notified of certain events in the session's lifecycle. */ public interface LifecycleListener extends AutoCloseable { /** diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java index 12f8506883a..5b4ff4dcec8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/NettyOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java index 49718b7df97..89a9266b3ac 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,24 +17,78 @@ */ package com.datastax.oss.driver.internal.core.context; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.protocol.internal.request.Startup; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import com.fasterxml.jackson.databind.ObjectMapper; +import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Map; +import java.util.Optional; +import java.util.UUID; import net.jcip.annotations.Immutable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Immutable public class StartupOptionsBuilder { public static final String DRIVER_NAME_KEY = "DRIVER_NAME"; public static final String DRIVER_VERSION_KEY = "DRIVER_VERSION"; + public static final String DRIVER_BAGGAGE = "DRIVER_BAGGAGE"; + public static final String APPLICATION_NAME_KEY = "APPLICATION_NAME"; + public static final String APPLICATION_VERSION_KEY = "APPLICATION_VERSION"; + public static final String CLIENT_ID_KEY = "CLIENT_ID"; + + private static final Logger LOG = LoggerFactory.getLogger(StartupOptionsBuilder.class); + private static final ObjectMapper mapper = new ObjectMapper(); protected final InternalDriverContext context; + private UUID clientId; + private String applicationName; + private String applicationVersion; public StartupOptionsBuilder(InternalDriverContext context) { this.context = context; } + /** + * Sets the client ID to be sent in the Startup message options. + * + *

      If this method is not invoked, or the id passed in is null, a random {@link UUID} will be + * generated and used by default. + */ + public StartupOptionsBuilder withClientId(@Nullable UUID clientId) { + this.clientId = clientId; + return this; + } + + /** + * Sets the client application name to be sent in the Startup message options. + * + *

      If this method is not invoked, or the name passed in is null, no application name option + * will be sent in the startup message options. + */ + public StartupOptionsBuilder withApplicationName(@Nullable String applicationName) { + this.applicationName = applicationName; + return this; + } + + /** + * Sets the client application version to be sent in the Startup message options. + * + *

      If this method is not invoked, or the name passed in is null, no application version option + * will be sent in the startup message options. + */ + public StartupOptionsBuilder withApplicationVersion(@Nullable String applicationVersion) { + this.applicationVersion = applicationVersion; + return this; + } + /** * Builds a map of options to send in a Startup message. * @@ -46,16 +102,36 @@ public StartupOptionsBuilder(InternalDriverContext context) { * @return Map of Startup Options. */ public Map build() { + DriverExecutionProfile config = context.getConfig().getDefaultProfile(); + NullAllowingImmutableMap.Builder builder = NullAllowingImmutableMap.builder(3); // add compression (if configured) and driver name and version String compressionAlgorithm = context.getCompressor().algorithm(); if (compressionAlgorithm != null && !compressionAlgorithm.trim().isEmpty()) { builder.put(Startup.COMPRESSION_KEY, compressionAlgorithm.trim()); } - return builder - .put(DRIVER_NAME_KEY, getDriverName()) - .put(DRIVER_VERSION_KEY, getDriverVersion()) - .build(); + builder.put(DRIVER_NAME_KEY, getDriverName()).put(DRIVER_VERSION_KEY, getDriverVersion()); + + // Add Insights entries, falling back to generation / config if no programmatic values provided: + if (clientId == null) { + clientId = Uuids.random(); + } + builder.put(CLIENT_ID_KEY, clientId.toString()); + if (applicationName == null) { + applicationName = config.getString(DseDriverOption.APPLICATION_NAME, null); + } + if (applicationName != null) { + builder.put(APPLICATION_NAME_KEY, applicationName); + } + if (applicationVersion == null) { + applicationVersion = config.getString(DseDriverOption.APPLICATION_VERSION, null); + } + if (applicationVersion != null) { + builder.put(APPLICATION_VERSION_KEY, applicationVersion); + } + driverBaggage().ifPresent(s -> builder.put(DRIVER_BAGGAGE, s)); + + return builder.build(); } /** @@ -77,4 +153,21 @@ protected String getDriverName() { protected String getDriverVersion() { return Session.OSS_DRIVER_COORDINATES.getVersion().toString(); } + + private Optional driverBaggage() { + ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); + for (Map.Entry entry : + context.getLoadBalancingPolicies().entrySet()) { + Map config = entry.getValue().getStartupConfiguration(); + if (!config.isEmpty()) { + builder.put(entry.getKey(), config); + } + } + try { + return Optional.of(mapper.writeValueAsString(builder.build())); + } catch (Exception e) { + LOG.warn("Failed to construct startup driver baggage", e); + return Optional.empty(); + } + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java index 50b6ffe90f0..5c29a9b704b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/control/ControlConnection.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,9 +50,12 @@ import com.datastax.oss.protocol.internal.response.event.TopologyChangeEvent; import edu.umd.cs.findbugs.annotations.NonNull; import io.netty.util.concurrent.EventExecutor; +import java.util.AbstractMap.SimpleEntry; +import java.util.ArrayList; import java.util.Collection; -import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Queue; import java.util.WeakHashMap; import java.util.concurrent.CompletableFuture; @@ -102,9 +107,8 @@ public ControlConnection(InternalDriverContext context) { * @param listenToClusterEvents whether to register for TOPOLOGY_CHANGE and STATUS_CHANGE events. * If the control connection has already initialized with another value, this is ignored. * SCHEMA_CHANGE events are always registered. - * @param reconnectOnFailure whether to schedule a reconnection if the initial attempt fails (this - * does not affect the returned future, which always represent the outcome of the initial - * attempt only). + * @param reconnectOnFailure whether to schedule a reconnection if the initial attempt fails (if + * true, the returned future will only complete once the reconnection has succeeded). * @param useInitialReconnectionSchedule if no node can be reached, the type of reconnection * schedule to use. In other words, the value that will be passed to {@link * ReconnectionPolicy#newControlConnectionSchedule(boolean)}. Note that this parameter is only @@ -130,10 +134,6 @@ public boolean isInit() { return singleThreaded.initFuture.isDone(); } - public CompletionStage firstConnectionAttemptFuture() { - return singleThreaded.firstConnectionAttemptFuture; - } - /** * The channel currently used by this control connection. This is modified concurrently in the * event of a reconnection, so it may occasionally return a closed channel (clients should be @@ -226,14 +226,26 @@ private void processStatusChange(Event event) { private void processSchemaChange(Event event) { SchemaChangeEvent sce = (SchemaChangeEvent) event; - context.getMetadataManager().refreshSchema(sce.keyspace, false, false); + context + .getMetadataManager() + .refreshSchema(sce.keyspace, false, false) + .whenComplete( + (metadata, error) -> { + if (error != null) { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error while refreshing schema for a SCHEMA_CHANGE event, " + + "keeping previous version", + logPrefix, + error); + } + }); } private class SingleThreaded { private final InternalDriverContext context; private final DriverConfig config; private final CompletableFuture initFuture = new CompletableFuture<>(); - private final CompletableFuture firstConnectionAttemptFuture = new CompletableFuture<>(); private boolean initWasCalled; private final CompletableFuture closeFuture = new CompletableFuture<>(); private boolean closeWasCalled; @@ -241,8 +253,8 @@ private class SingleThreaded { private final Reconnection reconnection; private DriverChannelOptions channelOptions; // The last events received for each node - private final Map lastDistanceEvents = new WeakHashMap<>(); - private final Map lastStateEvents = new WeakHashMap<>(); + private final Map lastNodeDistance = new WeakHashMap<>(); + private final Map lastNodeState = new WeakHashMap<>(); private SingleThreaded(InternalDriverContext context) { this.context = context; @@ -293,10 +305,7 @@ private void init( connect( nodes, null, - () -> { - initFuture.complete(null); - firstConnectionAttemptFuture.complete(null); - }, + () -> initFuture.complete(null), error -> { if (isAuthFailure(error)) { LOG.warn( @@ -319,7 +328,6 @@ private void init( } initFuture.completeExceptionally(error); } - firstConnectionAttemptFuture.completeExceptionally(error); }); } catch (Throwable t) { initFuture.completeExceptionally(t); @@ -343,7 +351,7 @@ private CompletionStage reconnect() { private void connect( Queue nodes, - Map errors, + List> errors, Runnable onSuccess, Consumer onFailure) { assert adminExecutor.inEventLoop(); @@ -358,8 +366,8 @@ private void connect( .whenCompleteAsync( (channel, error) -> { try { - DistanceEvent lastDistanceEvent = lastDistanceEvents.get(node); - NodeStateEvent lastStateEvent = lastStateEvents.get(node); + NodeDistance lastDistance = lastNodeDistance.get(node); + NodeState lastState = lastNodeState.get(node); if (error != null) { if (closeWasCalled || initFuture.isCancelled()) { onSuccess.run(); // abort, we don't really care about the result @@ -385,9 +393,9 @@ private void connect( error); } } - Map newErrors = - (errors == null) ? new LinkedHashMap<>() : errors; - newErrors.put(node, error); + List> newErrors = + (errors == null) ? new ArrayList<>() : errors; + newErrors.add(new SimpleEntry<>(node, error)); context.getEventBus().fire(ChannelEvent.controlConnectionFailed(node)); connect(nodes, newErrors, onSuccess, onFailure); } @@ -398,8 +406,7 @@ private void connect( channel); channel.forceClose(); onSuccess.run(); - } else if (lastDistanceEvent != null - && lastDistanceEvent.distance == NodeDistance.IGNORED) { + } else if (lastDistance == NodeDistance.IGNORED) { LOG.debug( "[{}] New channel opened ({}) but node became ignored, " + "closing and trying next node", @@ -407,9 +414,9 @@ private void connect( channel); channel.forceClose(); connect(nodes, errors, onSuccess, onFailure); - } else if (lastStateEvent != null - && (lastStateEvent.newState == null /*(removed)*/ - || lastStateEvent.newState == NodeState.FORCED_DOWN)) { + } else if (lastNodeState.containsKey(node) + && (lastState == null /*(removed)*/ + || lastState == NodeState.FORCED_DOWN)) { LOG.debug( "[{}] New channel opened ({}) but node was removed or forced down, " + "closing and trying next node", @@ -418,14 +425,16 @@ private void connect( channel.forceClose(); connect(nodes, errors, onSuccess, onFailure); } else { - LOG.debug("[{}] Connection established to {}", logPrefix, node); - // Make sure previous channel gets closed (it may still be open if - // reconnection was forced) + LOG.debug("[{}] New channel opened {}", logPrefix, channel); DriverChannel previousChannel = ControlConnection.this.channel; + ControlConnection.this.channel = channel; if (previousChannel != null) { + // We were reconnecting: make sure previous channel gets closed (it may + // still be open if reconnection was forced) + LOG.debug( + "[{}] Forcefully closing previous channel {}", logPrefix, channel); previousChannel.forceClose(); } - ControlConnection.this.channel = channel; context.getEventBus().fire(ChannelEvent.channelOpened(node)); channel .closeFuture() @@ -449,39 +458,69 @@ private void connect( } private void onSuccessfulReconnect() { - // If reconnectOnFailure was true and we've never connected before, complete the future now, - // otherwise it's already complete and this is a no-op. - initFuture.complete(null); + // If reconnectOnFailure was true and we've never connected before, complete the future now to + // signal that the initialization is complete. + boolean isFirstConnection = initFuture.complete(null); - // Always perform a full refresh (we don't know how long we were disconnected) - context - .getMetadataManager() - .refreshNodes() - .whenComplete( - (result, error) -> { - if (error != null) { - LOG.debug("[{}] Error while refreshing node list", logPrefix, error); - } else { - try { - // A failed node list refresh at startup is not fatal, so this might be the - // first successful refresh; make sure the LBP gets initialized (this is a no-op - // if it was initialized already). - context.getLoadBalancingPolicyWrapper().init(); - context.getMetadataManager().refreshSchema(null, false, true); - } catch (Throwable t) { - Loggers.warnWithException( - LOG, "[{}] Unexpected error on control connection reconnect", logPrefix, t); + // Otherwise, perform a full refresh (we don't know how long we were disconnected) + if (!isFirstConnection) { + context + .getMetadataManager() + .refreshNodes() + .whenComplete( + (result, error) -> { + if (error != null) { + LOG.debug("[{}] Error while refreshing node list", logPrefix, error); + } else { + try { + // A failed node list refresh at startup is not fatal, so this might be the + // first successful refresh; make sure the LBP gets initialized (this is a + // no-op if it was initialized already). + context.getLoadBalancingPolicyWrapper().init(); + context + .getMetadataManager() + .refreshSchema(null, false, true) + .whenComplete( + (metadata, schemaError) -> { + if (schemaError != null) { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error while refreshing schema after a " + + "successful reconnection, keeping previous version", + logPrefix, + schemaError); + } + }); + } catch (Throwable t) { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error on control connection reconnect", + logPrefix, + t); + } } - } - }); + }); + } } private void onChannelClosed(DriverChannel channel, Node node) { assert adminExecutor.inEventLoop(); if (!closeWasCalled) { - LOG.debug("[{}] Lost channel {}", logPrefix, channel); context.getEventBus().fire(ChannelEvent.channelClosed(node)); - reconnection.start(); + // If this channel is the current control channel, we must start a + // reconnection attempt to get a new control channel. + if (channel == ControlConnection.this.channel) { + LOG.debug( + "[{}] The current control channel {} was closed, scheduling reconnection", + logPrefix, + channel); + reconnection.start(); + } else { + LOG.trace( + "[{}] A previous control channel {} was closed, reconnection not required", + logPrefix, + channel); + } } } @@ -494,7 +533,7 @@ private void reconnectNow() { private void onDistanceEvent(DistanceEvent event) { assert adminExecutor.inEventLoop(); - this.lastDistanceEvents.put(event.node, event); + this.lastNodeDistance.put(event.node, event.distance); if (event.distance == NodeDistance.IGNORED && channel != null && !channel.closeFuture().isDone() @@ -509,7 +548,7 @@ private void onDistanceEvent(DistanceEvent event) { private void onStateEvent(NodeStateEvent event) { assert adminExecutor.inEventLoop(); - this.lastStateEvents.put(event.node, event); + this.lastNodeState.put(event.node, event.newState); if ((event.newState == null /*(removed)*/ || event.newState == NodeState.FORCED_DOWN) && channel != null && !channel.closeFuture().isDone() @@ -550,20 +589,21 @@ private void forceClose() { } private boolean isAuthFailure(Throwable error) { - boolean authFailure = true; if (error instanceof AllNodesFailedException) { - Collection errors = ((AllNodesFailedException) error).getErrors().values(); + Collection> errors = + ((AllNodesFailedException) error).getAllErrors().values(); if (errors.size() == 0) { return false; } - for (Throwable nodeError : errors) { - if (!(nodeError instanceof AuthenticationException)) { - authFailure = false; - break; + for (List nodeErrors : errors) { + for (Throwable nodeError : nodeErrors) { + if (!(nodeError instanceof AuthenticationException)) { + return false; + } } } } - return authFailure; + return true; } private static ImmutableList buildEventTypes(boolean listenClusterEvents) { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java index 652092e0cff..ff9384b3e24 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/Conversions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,9 +37,13 @@ import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; import com.datastax.oss.driver.api.core.servererrors.AlreadyExistsException; import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; +import com.datastax.oss.driver.api.core.servererrors.CASWriteUnknownException; +import com.datastax.oss.driver.api.core.servererrors.CDCWriteFailureException; import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; import com.datastax.oss.driver.api.core.servererrors.InvalidConfigurationInQueryException; @@ -54,15 +60,14 @@ import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; +import com.datastax.oss.driver.internal.core.data.ValuesHelper; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.primitives.Ints; import com.datastax.oss.protocol.internal.Message; @@ -74,6 +79,7 @@ import com.datastax.oss.protocol.internal.response.Error; import com.datastax.oss.protocol.internal.response.Result; import com.datastax.oss.protocol.internal.response.error.AlreadyExists; +import com.datastax.oss.protocol.internal.response.error.CASWriteUnknown; import com.datastax.oss.protocol.internal.response.error.ReadFailure; import com.datastax.oss.protocol.internal.response.error.ReadTimeout; import com.datastax.oss.protocol.internal.response.error.Unavailable; @@ -87,6 +93,7 @@ import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableList; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; import java.nio.ByteBuffer; +import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -132,13 +139,19 @@ public static Message toMessage( config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) : serialConsistency.getProtocolCode(); long timestamp = statement.getQueryTimestamp(); - if (timestamp == Long.MIN_VALUE) { + if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) { timestamp = context.getTimestampGenerator().next(); } CodecRegistry codecRegistry = context.getCodecRegistry(); ProtocolVersion protocolVersion = context.getProtocolVersion(); ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry(); CqlIdentifier keyspace = statement.getKeyspace(); + int nowInSeconds = statement.getNowInSeconds(); + if (nowInSeconds != Statement.NO_NOW_IN_SECONDS + && !protocolVersionRegistry.supports( + protocolVersion, DefaultProtocolFeature.NOW_IN_SECONDS)) { + throw new IllegalArgumentException("Can't use nowInSeconds with protocol " + protocolVersion); + } if (statement instanceof SimpleStatement) { SimpleStatement simpleStatement = (SimpleStatement) statement; List positionalValues = simpleStatement.getPositionalValues(); @@ -163,7 +176,8 @@ public static Message toMessage( statement.getPagingState(), serialConsistencyCode, timestamp, - (keyspace == null) ? null : keyspace.asInternal()); + (keyspace == null) ? null : keyspace.asInternal(), + nowInSeconds); return new Query(simpleStatement.getQuery(), queryOptions); } else if (statement instanceof BoundStatement) { BoundStatement boundStatement = (BoundStatement) statement; @@ -183,7 +197,8 @@ public static Message toMessage( statement.getPagingState(), serialConsistencyCode, timestamp, - null); + null, + nowInSeconds); PreparedStatement preparedStatement = boundStatement.getPreparedStatement(); ByteBuffer id = preparedStatement.getId(); ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId(); @@ -233,7 +248,8 @@ public static Message toMessage( consistencyCode, serialConsistencyCode, timestamp, - (keyspace == null) ? null : keyspace.asInternal()); + (keyspace == null) ? null : keyspace.asInternal(), + nowInSeconds); } else { throw new IllegalArgumentException( "Unsupported statement type: " + statement.getClass().getName()); @@ -248,7 +264,10 @@ public static List encode( ByteBuffer[] encodedValues = new ByteBuffer[values.size()]; int i = 0; for (Object value : values) { - encodedValues[i++] = (value == null) ? null : encode(value, codecRegistry, protocolVersion); + encodedValues[i++] = + (value == null) + ? null + : ValuesHelper.encodeToDefaultCqlMapping(value, codecRegistry, protocolVersion); } return NullAllowingImmutableList.of(encodedValues); } @@ -269,30 +288,14 @@ public static Map encode( } else { encodedValues.put( entry.getKey().asInternal(), - encode(entry.getValue(), codecRegistry, protocolVersion)); + ValuesHelper.encodeToDefaultCqlMapping( + entry.getValue(), codecRegistry, protocolVersion)); } } return encodedValues.build(); } } - public static ByteBuffer encode( - Object value, CodecRegistry codecRegistry, ProtocolVersion protocolVersion) { - if (value instanceof Token) { - if (value instanceof Murmur3Token) { - return TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); - } else if (value instanceof ByteOrderedToken) { - return TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); - } else if (value instanceof RandomToken) { - return TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); - } else { - throw new IllegalArgumentException("Unsupported token type " + value.getClass()); - } - } else { - return codecRegistry.codecFor(value).encode(value, protocolVersion); - } - } - public static void ensureAllSet(BoundStatement boundStatement) { for (int i = 0; i < boundStatement.size(); i++) { if (!boundStatement.isSet(i)) { @@ -320,7 +323,7 @@ public static AsyncResultSet toResultSet( InternalDriverContext context) { if (result instanceof Rows) { Rows rows = (Rows) result; - Statement statement = executionInfo.getStatement(); + Statement statement = (Statement) executionInfo.getRequest(); ColumnDefinitions columnDefinitions = getResultDefinitions(rows, statement, context); return new DefaultAsyncResultSet( columnDefinitions, executionInfo, rows.getData(), session, context); @@ -358,11 +361,21 @@ public static ColumnDefinitions getResultDefinitions( public static DefaultPreparedStatement toPreparedStatement( Prepared response, PrepareRequest request, InternalDriverContext context) { + ColumnDefinitions variableDefinitions = + toColumnDefinitions(response.variablesMetadata, context); + + int[] pkIndicesInResponse = response.variablesMetadata.pkIndices; + // null means a legacy protocol version that doesn't provide the info, try to compute it + List pkIndices = + (pkIndicesInResponse == null) + ? computePkIndices(variableDefinitions, context) + : Ints.asList(pkIndicesInResponse); + return new DefaultPreparedStatement( ByteBuffer.wrap(response.preparedQueryId).asReadOnlyBuffer(), request.getQuery(), - toColumnDefinitions(response.variablesMetadata, context), - asList(response.variablesMetadata.pkIndices), + variableDefinitions, + pkIndices, (response.resultMetadataId == null) ? null : ByteBuffer.wrap(response.resultMetadataId).asReadOnlyBuffer(), @@ -396,12 +409,39 @@ public static ColumnDefinitions toColumnDefinitions( return DefaultColumnDefinitions.valueOf(ImmutableList.copyOf(values)); } - public static List asList(int[] pkIndices) { - if (pkIndices == null || pkIndices.length == 0) { + public static List computePkIndices( + ColumnDefinitions variables, InternalDriverContext context) { + if (variables.size() == 0) { return Collections.emptyList(); - } else { - return Ints.asList(pkIndices); } + // The rest of the computation relies on the fact that CQL does not have joins: all variables + // belong to the same keyspace and table. + ColumnDefinition firstVariable = variables.get(0); + return context + .getMetadataManager() + .getMetadata() + .getKeyspace(firstVariable.getKeyspace()) + .flatMap(ks -> ks.getTable(firstVariable.getTable())) + .map(RelationMetadata::getPartitionKey) + .map(pk -> findIndices(pk, variables)) + .orElse(Collections.emptyList()); + } + + // Find at which position in `variables` each element of `partitionKey` appears + @VisibleForTesting + static List findIndices(List partitionKey, ColumnDefinitions variables) { + ImmutableList.Builder result = + ImmutableList.builderWithExpectedSize(partitionKey.size()); + for (ColumnMetadata pkColumn : partitionKey) { + int firstIndex = variables.firstIndexOf(pkColumn.getName()); + if (firstIndex < 0) { + // If a single column is missing, we can abort right away + return Collections.emptyList(); + } else { + result.add(firstIndex); + } + } + return result.build(); } public static CoordinatorException toThrowable( @@ -427,7 +467,7 @@ public static CoordinatorException toThrowable( unavailable.required, unavailable.alive); case ProtocolConstants.ErrorCode.OVERLOADED: - return new OverloadedException(node); + return new OverloadedException(node, errorMessage.message); case ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING: return new BootstrappingException(node); case ProtocolConstants.ErrorCode.TRUNCATE_ERROR: @@ -470,6 +510,15 @@ public static CoordinatorException toThrowable( context.getWriteTypeRegistry().fromName(writeFailure.writeType), writeFailure.numFailures, writeFailure.reasonMap); + case ProtocolConstants.ErrorCode.CDC_WRITE_FAILURE: + return new CDCWriteFailureException(node, errorMessage.message); + case ProtocolConstants.ErrorCode.CAS_WRITE_UNKNOWN: + CASWriteUnknown casFailure = (CASWriteUnknown) errorMessage; + return new CASWriteUnknownException( + node, + context.getConsistencyLevelRegistry().codeToLevel(casFailure.consistencyLevel), + casFailure.received, + casFailure.blockFor); case ProtocolConstants.ErrorCode.SYNTAX_ERROR: return new SyntaxError(node, errorMessage.message); case ProtocolConstants.ErrorCode.UNAUTHORIZED: @@ -485,4 +534,60 @@ public static CoordinatorException toThrowable( return new ProtocolError(node, "Unknown error code: " + errorMessage.code); } } + + /** Use {@link #resolveIdempotence(Request, DriverExecutionProfile)} instead. */ + @Deprecated + public static boolean resolveIdempotence(Request request, InternalDriverContext context) { + return resolveIdempotence(request, resolveExecutionProfile(request, context)); + } + + public static boolean resolveIdempotence( + Request request, DriverExecutionProfile executionProfile) { + Boolean requestIsIdempotent = request.isIdempotent(); + return (requestIsIdempotent == null) + ? executionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE) + : requestIsIdempotent; + } + + /** Use {@link #resolveRequestTimeout(Request, DriverExecutionProfile)} instead. */ + @Deprecated + public static Duration resolveRequestTimeout(Request request, InternalDriverContext context) { + return resolveRequestTimeout(request, resolveExecutionProfile(request, context)); + } + + public static Duration resolveRequestTimeout( + Request request, DriverExecutionProfile executionProfile) { + Duration timeout = request.getTimeout(); + return timeout != null + ? timeout + : executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); + } + + /** Use {@link #resolveRetryPolicy(InternalDriverContext, DriverExecutionProfile)} instead. */ + @Deprecated + public static RetryPolicy resolveRetryPolicy(Request request, InternalDriverContext context) { + DriverExecutionProfile executionProfile = resolveExecutionProfile(request, context); + return context.getRetryPolicy(executionProfile.getName()); + } + + public static RetryPolicy resolveRetryPolicy( + InternalDriverContext context, DriverExecutionProfile executionProfile) { + return context.getRetryPolicy(executionProfile.getName()); + } + + /** + * Use {@link #resolveSpeculativeExecutionPolicy(InternalDriverContext, DriverExecutionProfile)} + * instead. + */ + @Deprecated + public static SpeculativeExecutionPolicy resolveSpeculativeExecutionPolicy( + Request request, InternalDriverContext context) { + DriverExecutionProfile executionProfile = resolveExecutionProfile(request, context); + return context.getSpeculativeExecutionPolicy(executionProfile.getName()); + } + + public static SpeculativeExecutionPolicy resolveSpeculativeExecutionPolicy( + InternalDriverContext context, DriverExecutionProfile executionProfile) { + return context.getSpeculativeExecutionPolicy(executionProfile.getName()); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java index 4e0b51fe482..a3d11cff054 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareAsyncProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,31 +20,115 @@ import com.datastax.oss.driver.api.core.cql.PrepareRequest; import com.datastax.oss.driver.api.core.cql.PreparedStatement; import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.ListType; +import com.datastax.oss.driver.api.core.type.MapType; +import com.datastax.oss.driver.api.core.type.SetType; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; import com.datastax.oss.driver.internal.core.session.DefaultSession; import com.datastax.oss.driver.internal.core.session.RequestProcessor; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; +import com.datastax.oss.driver.shaded.guava.common.base.Functions; import com.datastax.oss.driver.shaded.guava.common.cache.Cache; import com.datastax.oss.driver.shaded.guava.common.cache.CacheBuilder; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.util.concurrent.EventExecutor; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; +import java.util.function.Function; import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @ThreadSafe public class CqlPrepareAsyncProcessor implements RequestProcessor> { + private static final Logger LOG = LoggerFactory.getLogger(CqlPrepareAsyncProcessor.class); + protected final Cache> cache; public CqlPrepareAsyncProcessor() { - this(CacheBuilder.newBuilder().weakValues().build()); + this(Optional.empty()); + } + + public CqlPrepareAsyncProcessor(@NonNull Optional context) { + this(context, Functions.identity()); } protected CqlPrepareAsyncProcessor( - Cache> cache) { - this.cache = cache; + Optional context, + Function, CacheBuilder> decorator) { + + CacheBuilder baseCache = CacheBuilder.newBuilder().weakValues(); + this.cache = decorator.apply(baseCache).build(); + context.ifPresent( + (ctx) -> { + LOG.info("Adding handler to invalidate cached prepared statements on type changes"); + EventExecutor adminExecutor = ctx.getNettyOptions().adminEventExecutorGroup().next(); + ctx.getEventBus() + .register( + TypeChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onTypeChanged)); + }); + } + + private static boolean typeMatches(UserDefinedType oldType, DataType typeToCheck) { + + switch (typeToCheck.getProtocolCode()) { + case ProtocolConstants.DataType.UDT: + UserDefinedType udtType = (UserDefinedType) typeToCheck; + return udtType.equals(oldType) + ? true + : Iterables.any(udtType.getFieldTypes(), (testType) -> typeMatches(oldType, testType)); + case ProtocolConstants.DataType.LIST: + ListType listType = (ListType) typeToCheck; + return typeMatches(oldType, listType.getElementType()); + case ProtocolConstants.DataType.SET: + SetType setType = (SetType) typeToCheck; + return typeMatches(oldType, setType.getElementType()); + case ProtocolConstants.DataType.MAP: + MapType mapType = (MapType) typeToCheck; + return typeMatches(oldType, mapType.getKeyType()) + || typeMatches(oldType, mapType.getValueType()); + case ProtocolConstants.DataType.TUPLE: + TupleType tupleType = (TupleType) typeToCheck; + return Iterables.any( + tupleType.getComponentTypes(), (testType) -> typeMatches(oldType, testType)); + default: + return false; + } + } + + private void onTypeChanged(TypeChangeEvent event) { + for (Map.Entry> entry : + this.cache.asMap().entrySet()) { + + try { + PreparedStatement stmt = entry.getValue().get(); + if (Iterables.any( + stmt.getResultSetDefinitions(), (def) -> typeMatches(event.oldType, def.getType())) + || Iterables.any( + stmt.getVariableDefinitions(), + (def) -> typeMatches(event.oldType, def.getType()))) { + + this.cache.invalidate(entry.getKey()); + this.cache.cleanUp(); + } + } catch (Exception e) { + LOG.info("Exception while invalidating prepared statement cache due to UDT change", e); + } + } } @Override @@ -76,7 +162,9 @@ public CompletionStage process( }); } } - return result; + // Return a defensive copy. So if a client cancels its request, the cache won't be impacted + // nor a potential concurrent request. + return result.thenApply(x -> x); // copy() is available only since Java 9 } catch (ExecutionException e) { return CompletableFutures.failedFuture(e.getCause()); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java index cc9c9ea0cfb..1ee1f303ab2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +20,7 @@ import com.datastax.oss.driver.api.core.AllNodesFailedException; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; import com.datastax.oss.driver.api.core.ProtocolVersion; import com.datastax.oss.driver.api.core.RequestThrottlingException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -28,6 +31,7 @@ import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; import com.datastax.oss.driver.api.core.retry.RetryDecision; import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; @@ -55,6 +59,7 @@ import io.netty.util.Timer; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; +import java.nio.ByteBuffer; import java.time.Duration; import java.util.AbstractMap; import java.util.ArrayList; @@ -78,19 +83,16 @@ public class CqlPrepareHandler implements Throttled { private final long startTimeNanos; private final String logPrefix; - private final PrepareRequest request; + private final PrepareRequest initialRequest; private final DefaultSession session; private final InternalDriverContext context; - private final DriverExecutionProfile executionProfile; private final Queue queryPlan; protected final CompletableFuture result; - private final Message message; private final Timer timer; - private final Duration timeout; private final Timeout scheduledTimeout; - private final RetryPolicy retryPolicy; private final RequestThrottler throttler; private final Boolean prepareOnAllNodes; + private final DriverExecutionProfile executionProfile; private volatile InitialPrepareCallback initialCallback; // The errors on the nodes that were already tried (lazily initialized on the first error). @@ -107,15 +109,14 @@ protected CqlPrepareHandler( this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); LOG.trace("[{}] Creating new handler for prepare request {}", logPrefix, request); - this.request = request; + this.initialRequest = request; this.session = session; this.context = context; - this.executionProfile = Conversions.resolveExecutionProfile(request, context); + executionProfile = Conversions.resolveExecutionProfile(request, context); this.queryPlan = context .getLoadBalancingPolicyWrapper() .newQueryPlan(request, executionProfile.getName(), session); - this.retryPolicy = context.getRetryPolicy(executionProfile.getName()); this.result = new CompletableFuture<>(); this.result.exceptionally( @@ -123,28 +124,16 @@ protected CqlPrepareHandler( try { if (t instanceof CancellationException) { cancelTimeout(); + context.getRequestThrottler().signalCancel(this); } } catch (Throwable t2) { Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); } return null; }); - ProtocolVersion protocolVersion = context.getProtocolVersion(); - ProtocolVersionRegistry registry = context.getProtocolVersionRegistry(); - CqlIdentifier keyspace = request.getKeyspace(); - if (keyspace != null - && !registry.supports(protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { - throw new IllegalArgumentException( - "Can't use per-request keyspace with protocol " + protocolVersion); - } - this.message = - new Prepare(request.getQuery(), (keyspace == null) ? null : keyspace.asInternal()); this.timer = context.getNettyOptions().getTimer(); - this.timeout = - request.getTimeout() != null - ? request.getTimeout() - : executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); + Duration timeout = Conversions.resolveRequestTimeout(request, executionProfile); this.scheduledTimeout = scheduleTimeout(timeout); this.prepareOnAllNodes = executionProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES); @@ -154,6 +143,8 @@ protected CqlPrepareHandler( @Override public void onThrottleReady(boolean wasDelayed) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(initialRequest, context); if (wasDelayed) { session .getMetricUpdater() @@ -163,7 +154,7 @@ public void onThrottleReady(boolean wasDelayed) { System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS); } - sendRequest(null, 0); + sendRequest(initialRequest, null, 0); } public CompletableFuture handle() { @@ -192,7 +183,7 @@ private void cancelTimeout() { } } - private void sendRequest(Node node, int retryCount) { + private void sendRequest(PrepareRequest request, Node node, int retryCount) { if (result.isDone()) { return; } @@ -202,6 +193,8 @@ private void sendRequest(Node node, int retryCount) { channel = session.getChannel(node, logPrefix); if (channel != null) { break; + } else { + recordError(node, new NodeUnavailableException(node)); } } } @@ -209,13 +202,29 @@ private void sendRequest(Node node, int retryCount) { setFinalError(AllNodesFailedException.fromErrors(this.errors)); } else { InitialPrepareCallback initialPrepareCallback = - new InitialPrepareCallback(node, channel, retryCount); + new InitialPrepareCallback(request, node, channel, retryCount); + + Prepare message = toPrepareMessage(request); + channel .write(message, false, request.getCustomPayload(), initialPrepareCallback) .addListener(initialPrepareCallback); } } + @NonNull + private Prepare toPrepareMessage(PrepareRequest request) { + ProtocolVersion protocolVersion = context.getProtocolVersion(); + ProtocolVersionRegistry registry = context.getProtocolVersionRegistry(); + CqlIdentifier keyspace = request.getKeyspace(); + if (keyspace != null + && !registry.supports(protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) { + throw new IllegalArgumentException( + "Can't use per-request keyspace with protocol " + protocolVersion); + } + return new Prepare(request.getQuery(), (keyspace == null) ? null : keyspace.asInternal()); + } + private void recordError(Node node, Throwable error) { // Use a local variable to do only a single single volatile read in the nominal case List> errorsSnapshot = this.errors; @@ -230,19 +239,19 @@ private void recordError(Node node, Throwable error) { errorsSnapshot.add(new AbstractMap.SimpleEntry<>(node, error)); } - private void setFinalResult(Prepared prepared) { + private void setFinalResult(PrepareRequest request, Prepared response) { // Whatever happens below, we're done with this stream id throttler.signalSuccess(this); DefaultPreparedStatement preparedStatement = - Conversions.toPreparedStatement(prepared, request, context); + Conversions.toPreparedStatement(response, request, context); session .getRepreparePayloads() .put(preparedStatement.getId(), preparedStatement.getRepreparePayload()); if (prepareOnAllNodes) { - prepareOnOtherNodes() + prepareOnOtherNodes(request) .thenRun( () -> { LOG.trace( @@ -260,35 +269,35 @@ private void setFinalResult(Prepared prepared) { } } - private CompletionStage prepareOnOtherNodes() { + private CompletionStage prepareOnOtherNodes(PrepareRequest request) { List> otherNodesFutures = new ArrayList<>(); // Only process the rest of the query plan. Any node before that is either the coordinator, or // a node that failed (we assume that retrying right now has little chance of success). for (Node node : queryPlan) { - otherNodesFutures.add(prepareOnOtherNode(node)); + otherNodesFutures.add(prepareOnOtherNode(request, node)); } return CompletableFutures.allDone(otherNodesFutures); } // Try to reprepare on another node, after the initial query has succeeded. Errors are not // blocking, the preparation will be retried later on that node. Simply warn and move on. - private CompletionStage prepareOnOtherNode(Node node) { + private CompletionStage prepareOnOtherNode(PrepareRequest request, Node node) { LOG.trace("[{}] Repreparing on {}", logPrefix, node); DriverChannel channel = session.getChannel(node, logPrefix); if (channel == null) { LOG.trace("[{}] Could not get a channel to reprepare on {}, skipping", logPrefix, node); return CompletableFuture.completedFuture(null); } else { - ThrottledAdminRequestHandler handler = - new ThrottledAdminRequestHandler( + ThrottledAdminRequestHandler handler = + ThrottledAdminRequestHandler.prepare( channel, - message, + false, + toPrepareMessage(request), request.getCustomPayload(), - timeout, + Conversions.resolveRequestTimeout(request, executionProfile), throttler, session.getMetricUpdater(), - logPrefix, - message.toString()); + logPrefix); return handler .start() .handle( @@ -306,6 +315,8 @@ private CompletionStage prepareOnOtherNode(Node node) { @Override public void onThrottleFailure(@NonNull RequestThrottlingException error) { + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(initialRequest, context); session .getMetricUpdater() .incrementCounter(DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); @@ -325,13 +336,16 @@ private void setFinalError(Throwable error) { private class InitialPrepareCallback implements ResponseCallback, GenericFutureListener> { + private final PrepareRequest request; private final Node node; private final DriverChannel channel; // How many times we've invoked the retry policy and it has returned a "retry" decision (0 for // the first attempt of each execution). private final int retryCount; - private InitialPrepareCallback(Node node, DriverChannel channel, int retryCount) { + private InitialPrepareCallback( + PrepareRequest request, Node node, DriverChannel channel, int retryCount) { + this.request = request; this.node = node; this.channel = channel; this.retryCount = retryCount; @@ -347,7 +361,7 @@ public void operationComplete(Future future) { node, future.cause().toString()); recordError(node, future.cause()); - sendRequest(null, retryCount); // try next host + sendRequest(request, null, retryCount); // try next host } else { if (result.isDone()) { // Might happen if the timeout just fired @@ -368,7 +382,7 @@ public void onResponse(Frame responseFrame) { Message responseMessage = responseFrame.message; if (responseMessage instanceof Prepared) { LOG.trace("[{}] Got result, completing", logPrefix); - setFinalResult((Prepared) responseMessage); + setFinalResult(request, (Prepared) responseMessage); } else if (responseMessage instanceof Error) { LOG.trace("[{}] Got error response, processing", logPrefix); processErrorResponse((Error) responseMessage); @@ -398,7 +412,7 @@ private void processErrorResponse(Error errorMessage) { if (error instanceof BootstrappingException) { LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); recordError(node, error); - sendRequest(null, retryCount); + sendRequest(request, null, retryCount); } else if (error instanceof QueryValidationException || error instanceof FunctionFailureException || error instanceof ProtocolError) { @@ -407,21 +421,23 @@ private void processErrorResponse(Error errorMessage) { } else { // Because prepare requests are known to always be idempotent, we call the retry policy // directly, without checking the flag. - RetryDecision decision = retryPolicy.onErrorResponse(request, error, retryCount); - processRetryDecision(decision, error); + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + RetryVerdict verdict = retryPolicy.onErrorResponseVerdict(request, error, retryCount); + processRetryVerdict(verdict, error); } } - private void processRetryDecision(RetryDecision decision, Throwable error) { + private void processRetryVerdict(RetryVerdict verdict, Throwable error) { + RetryDecision decision = verdict.getRetryDecision(); LOG.trace("[{}] Processing retry decision {}", logPrefix, decision); switch (decision) { case RETRY_SAME: recordError(node, error); - sendRequest(node, retryCount + 1); + sendRequest(verdict.getRetryRequest(request), node, retryCount + 1); break; case RETRY_NEXT: recordError(node, error); - sendRequest(null, retryCount + 1); + sendRequest(verdict.getRetryRequest(request), null, retryCount + 1); break; case RETHROW: setFinalError(error); @@ -441,8 +457,16 @@ public void onFailure(Throwable error) { return; } LOG.trace("[{}] Request failure, processing: {}", logPrefix, error.toString()); - RetryDecision decision = retryPolicy.onRequestAborted(request, error, retryCount); - processRetryDecision(decision, error); + RetryVerdict verdict; + try { + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + verdict = retryPolicy.onRequestAbortedVerdict(request, error, retryCount); + } catch (Throwable cause) { + setFinalError( + new IllegalStateException("Unexpected error while invoking the retry policy", cause)); + return; + } + processRetryVerdict(verdict, error); } public void cancel() { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java index 90e20f72394..0896df07140 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareSyncProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java index 837c0062602..3013848372b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestAsyncProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java index f01206e29a9..6842547b11a 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +21,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.DriverException; import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; import com.datastax.oss.driver.api.core.RequestThrottlingException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; @@ -29,8 +32,8 @@ import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; -import com.datastax.oss.driver.api.core.retry.RetryDecision; import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; import com.datastax.oss.driver.api.core.servererrors.FunctionFailureException; @@ -41,7 +44,7 @@ import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; import com.datastax.oss.driver.api.core.session.throttling.Throttled; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; import com.datastax.oss.driver.api.core.tracker.RequestTracker; import com.datastax.oss.driver.internal.core.adminrequest.ThrottledAdminRequestHandler; import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; @@ -56,7 +59,8 @@ import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; import com.datastax.oss.driver.internal.core.tracker.RequestLogger; import com.datastax.oss.driver.internal.core.util.Loggers; -import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; +import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; +import com.datastax.oss.driver.shaded.guava.common.base.Joiner; import com.datastax.oss.protocol.internal.Frame; import com.datastax.oss.protocol.internal.Message; import com.datastax.oss.protocol.internal.ProtocolConstants; @@ -80,6 +84,7 @@ import java.util.AbstractMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; @@ -98,15 +103,12 @@ public class CqlRequestHandler implements Throttled { private static final long NANOTIME_NOT_MEASURED_YET = -1; private final long startTimeNanos; - private final String logPrefix; - private final Statement statement; + private final String handlerLogPrefix; + private final Statement initialStatement; private final DefaultSession session; private final CqlIdentifier keyspace; private final InternalDriverContext context; - @NonNull private final DriverExecutionProfile executionProfile; - private final boolean isIdempotent; protected final CompletableFuture result; - private final Message message; private final Timer timer; /** * How many speculative executions are currently running (including the initial execution). We @@ -121,72 +123,70 @@ public class CqlRequestHandler implements Throttled { */ private final AtomicInteger startedSpeculativeExecutionsCount; - private final Duration timeout; final Timeout scheduledTimeout; final List scheduledExecutions; private final List inFlightCallbacks; - private final RetryPolicy retryPolicy; - private final SpeculativeExecutionPolicy speculativeExecutionPolicy; private final RequestThrottler throttler; private final RequestTracker requestTracker; + private final Optional requestIdGenerator; private final SessionMetricUpdater sessionMetricUpdater; + private final DriverExecutionProfile executionProfile; // The errors on the nodes that were already tried (lazily initialized on the first error). // We don't use a map because nodes can appear multiple times. private volatile List> errors; + private final Joiner logPrefixJoiner = Joiner.on('|'); + private final String sessionName; + private final String sessionRequestId; + protected CqlRequestHandler( Statement statement, DefaultSession session, InternalDriverContext context, - String sessionLogPrefix) { + String sessionName) { this.startTimeNanos = System.nanoTime(); - this.logPrefix = sessionLogPrefix + "|" + this.hashCode(); - LOG.trace("[{}] Creating new handler for request {}", logPrefix, statement); - - this.statement = statement; + this.requestIdGenerator = context.getRequestIdGenerator(); + this.sessionName = sessionName; + this.sessionRequestId = + this.requestIdGenerator + .map(RequestIdGenerator::getSessionRequestId) + .orElse(Integer.toString(this.hashCode())); + this.handlerLogPrefix = logPrefixJoiner.join(sessionName, sessionRequestId); + LOG.trace("[{}] Creating new handler for request {}", handlerLogPrefix, statement); + + this.initialStatement = statement; this.session = session; this.keyspace = session.getKeyspace().orElse(null); this.context = context; - this.executionProfile = Conversions.resolveExecutionProfile(statement, context); - this.retryPolicy = context.getRetryPolicy(executionProfile.getName()); - this.speculativeExecutionPolicy = - context.getSpeculativeExecutionPolicy(executionProfile.getName()); - Boolean statementIsIdempotent = statement.isIdempotent(); - this.isIdempotent = - (statementIsIdempotent == null) - ? executionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE) - : statementIsIdempotent; this.result = new CompletableFuture<>(); this.result.exceptionally( t -> { try { if (t instanceof CancellationException) { cancelScheduledTasks(); + context.getRequestThrottler().signalCancel(this); } } catch (Throwable t2) { - Loggers.warnWithException(LOG, "[{}] Uncaught exception", logPrefix, t2); + Loggers.warnWithException(LOG, "[{}] Uncaught exception", handlerLogPrefix, t2); } return null; }); - this.message = Conversions.toMessage(statement, executionProfile, context); - this.timer = context.getNettyOptions().getTimer(); - - this.timeout = - statement.getTimeout() != null - ? statement.getTimeout() - : executionProfile.getDuration(DefaultDriverOption.REQUEST_TIMEOUT); - this.scheduledTimeout = scheduleTimeout(timeout); this.activeExecutionsCount = new AtomicInteger(1); this.startedSpeculativeExecutionsCount = new AtomicInteger(0); - this.scheduledExecutions = isIdempotent ? new CopyOnWriteArrayList<>() : null; + this.scheduledExecutions = new CopyOnWriteArrayList<>(); this.inFlightCallbacks = new CopyOnWriteArrayList<>(); this.requestTracker = context.getRequestTracker(); this.sessionMetricUpdater = session.getMetricUpdater(); + this.timer = context.getNettyOptions().getTimer(); + this.executionProfile = Conversions.resolveExecutionProfile(initialStatement, context); + Duration timeout = Conversions.resolveRequestTimeout(statement, executionProfile); + this.scheduledTimeout = scheduleTimeout(timeout); + this.throttler = context.getRequestThrottler(); this.throttler.register(this); } @@ -204,12 +204,12 @@ public void onThrottleReady(boolean wasDelayed) { TimeUnit.NANOSECONDS); } Queue queryPlan = - this.statement.getNode() != null - ? new QueryPlan(this.statement.getNode()) + this.initialStatement.getNode() != null + ? new SimpleQueryPlan(this.initialStatement.getNode()) : context .getLoadBalancingPolicyWrapper() - .newQueryPlan(statement, executionProfile.getName(), session); - sendRequest(null, queryPlan, 0, 0, true); + .newQueryPlan(initialStatement, executionProfile.getName(), session); + sendRequest(initialStatement, null, queryPlan, 0, 0, true); } public CompletionStage handle() { @@ -222,6 +222,7 @@ private Timeout scheduleTimeout(Duration timeoutDuration) { return this.timer.newTimeout( (Timeout timeout1) -> setFinalError( + initialStatement, new DriverTimeoutException("Query timed out after " + timeoutDuration), null, -1), @@ -231,7 +232,7 @@ private Timeout scheduleTimeout(Duration timeoutDuration) { // If we raced with session shutdown the timer might be closed already, rethrow with a more // explicit message result.completeExceptionally( - ("cannot be started once stopped".equals(e.getMessage())) + "cannot be started once stopped".equals(e.getMessage()) ? new IllegalStateException("Session is closed") : e); } @@ -242,6 +243,7 @@ private Timeout scheduleTimeout(Duration timeoutDuration) { /** * Sends the request to the next available node. * + * @param statement The statement to execute. * @param retriedNode if not null, it will be attempted first before the rest of the query plan. * @param queryPlan the list of nodes to try (shared with all other executions) * @param currentExecutionIndex 0 for the initial execution, 1 for the first speculative one, etc. @@ -251,6 +253,7 @@ private Timeout scheduleTimeout(Duration timeoutDuration) { * @param scheduleNextExecution whether to schedule the next speculative execution */ private void sendRequest( + Statement statement, Node retriedNode, Queue queryPlan, int currentExecutionIndex, @@ -261,11 +264,13 @@ private void sendRequest( } Node node = retriedNode; DriverChannel channel = null; - if (node == null || (channel = session.getChannel(node, logPrefix)) == null) { + if (node == null || (channel = session.getChannel(node, handlerLogPrefix)) == null) { while (!result.isDone() && (node = queryPlan.poll()) != null) { - channel = session.getChannel(node, logPrefix); + channel = session.getChannel(node, handlerLogPrefix); if (channel != null) { break; + } else { + recordError(node, new NodeUnavailableException(node)); } } } @@ -273,18 +278,30 @@ private void sendRequest( // We've reached the end of the query plan without finding any node to write to if (!result.isDone() && activeExecutionsCount.decrementAndGet() == 0) { // We're the last execution so fail the result - setFinalError(AllNodesFailedException.fromErrors(this.errors), null, -1); + setFinalError(statement, AllNodesFailedException.fromErrors(this.errors), null, -1); } } else { + Statement finalStatement = statement; + String nodeRequestId = + this.requestIdGenerator + .map((g) -> g.getNodeRequestId(finalStatement, sessionRequestId)) + .orElse(Integer.toString(this.hashCode())); + statement = + this.requestIdGenerator + .map((g) -> g.getDecoratedStatement(finalStatement, nodeRequestId)) + .orElse(finalStatement); + NodeResponseCallback nodeResponseCallback = new NodeResponseCallback( + statement, node, queryPlan, channel, currentExecutionIndex, retryCount, scheduleNextExecution, - logPrefix); + logPrefixJoiner.join(this.sessionName, nodeRequestId, currentExecutionIndex)); + Message message = Conversions.toMessage(statement, executionProfile, context); channel .write(message, statement.isTracing(), statement.getCustomPayload(), nodeResponseCallback) .addListener(nodeResponseCallback); @@ -336,14 +353,23 @@ private void setFinalResult( // Only call nanoTime() if we're actually going to use it long completionTimeNanos = NANOTIME_NOT_MEASURED_YET, totalLatencyNanos = NANOTIME_NOT_MEASURED_YET; + if (!(requestTracker instanceof NoopRequestTracker)) { completionTimeNanos = System.nanoTime(); totalLatencyNanos = completionTimeNanos - startTimeNanos; long nodeLatencyNanos = completionTimeNanos - callback.nodeStartTimeNanos; requestTracker.onNodeSuccess( - statement, nodeLatencyNanos, executionProfile, callback.node, logPrefix); + callback.statement, + nodeLatencyNanos, + executionProfile, + callback.node, + handlerLogPrefix); requestTracker.onSuccess( - statement, totalLatencyNanos, executionProfile, callback.node, logPrefix); + callback.statement, + totalLatencyNanos, + executionProfile, + callback.node, + handlerLogPrefix); } if (sessionMetricUpdater.isEnabled( DefaultSessionMetric.CQL_REQUESTS, executionProfile.getName())) { @@ -362,14 +388,15 @@ private void setFinalResult( if (!executionInfo.getWarnings().isEmpty() && executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOG_WARNINGS) && LOG.isWarnEnabled()) { - logServerWarnings(executionInfo.getWarnings()); + logServerWarnings(callback.statement, executionProfile, executionInfo.getWarnings()); } } catch (Throwable error) { - setFinalError(error, callback.node, -1); + setFinalError(callback.statement, error, callback.node, -1); } } - private void logServerWarnings(List warnings) { + private void logServerWarnings( + Statement statement, DriverExecutionProfile executionProfile, List warnings) { // use the RequestLogFormatter to format the query StringBuilder statementString = new StringBuilder(); context @@ -403,7 +430,7 @@ private ExecutionInfo buildExecutionInfo( ByteBuffer pagingState = (resultMessage instanceof Rows) ? ((Rows) resultMessage).getMetadata().pagingState : null; return new DefaultExecutionInfo( - statement, + callback.statement, callback.node, startedSpeculativeExecutionsCount.get(), callback.execution, @@ -420,10 +447,10 @@ private ExecutionInfo buildExecutionInfo( public void onThrottleFailure(@NonNull RequestThrottlingException error) { sessionMetricUpdater.incrementCounter( DefaultSessionMetric.THROTTLING_ERRORS, executionProfile.getName()); - setFinalError(error, null, -1); + setFinalError(initialStatement, error, null, -1); } - private void setFinalError(Throwable error, Node node, int execution) { + private void setFinalError(Statement statement, Throwable error, Node node, int execution) { if (error instanceof DriverException) { ((DriverException) error) .setExecutionInfo( @@ -444,7 +471,8 @@ private void setFinalError(Throwable error, Node node, int execution) { cancelScheduledTasks(); if (!(requestTracker instanceof NoopRequestTracker)) { long latencyNanos = System.nanoTime() - startTimeNanos; - requestTracker.onError(statement, error, latencyNanos, executionProfile, node, logPrefix); + requestTracker.onError( + statement, error, latencyNanos, executionProfile, node, handlerLogPrefix); } if (error instanceof DriverTimeoutException) { throttler.signalTimeout(this); @@ -465,6 +493,7 @@ private class NodeResponseCallback implements ResponseCallback, GenericFutureListener> { private final long nodeStartTimeNanos = System.nanoTime(); + private final Statement statement; private final Node node; private final Queue queryPlan; private final DriverChannel channel; @@ -478,6 +507,7 @@ private class NodeResponseCallback private final String logPrefix; private NodeResponseCallback( + Statement statement, Node node, Queue queryPlan, DriverChannel channel, @@ -485,13 +515,14 @@ private NodeResponseCallback( int retryCount, boolean scheduleNextExecution, String logPrefix) { + this.statement = statement; this.node = node; this.queryPlan = queryPlan; this.channel = channel; this.execution = execution; this.retryCount = retryCount; this.scheduleNextExecution = scheduleNextExecution; - this.logPrefix = logPrefix + "|" + execution; + this.logPrefix = logPrefix; } // this gets invoked once the write completes. @@ -502,7 +533,7 @@ public void operationComplete(Future future) throws Exception { if (error instanceof EncoderException && error.getCause() instanceof FrameTooLongException) { trackNodeError(node, error.getCause(), NANOTIME_NOT_MEASURED_YET); - setFinalError(error.getCause(), node, execution); + setFinalError(statement, error.getCause(), node, execution); } else { LOG.trace( "[{}] Failed to send request on {}, trying next node (cause: {})", @@ -515,7 +546,12 @@ public void operationComplete(Future future) throws Exception { .getMetricUpdater() .incrementCounter(DefaultNodeMetric.UNSENT_REQUESTS, executionProfile.getName()); sendRequest( - null, queryPlan, execution, retryCount, scheduleNextExecution); // try next node + statement, + null, + queryPlan, + execution, + retryCount, + scheduleNextExecution); // try next node } } else { LOG.trace("[{}] Request sent on {}", logPrefix, channel); @@ -525,10 +561,23 @@ public void operationComplete(Future future) throws Exception { cancel(); } else { inFlightCallbacks.add(this); - if (scheduleNextExecution && isIdempotent) { + if (scheduleNextExecution + && Conversions.resolveIdempotence(statement, executionProfile)) { int nextExecution = execution + 1; - long nextDelay = - speculativeExecutionPolicy.nextExecution(node, keyspace, statement, nextExecution); + long nextDelay; + try { + nextDelay = + Conversions.resolveSpeculativeExecutionPolicy(context, executionProfile) + .nextExecution(node, keyspace, statement, nextExecution); + } catch (Throwable cause) { + // This is a bug in the policy, but not fatal since we have at least one other + // execution already running. Don't fail the whole request. + LOG.error( + "[{}] Unexpected error while invoking the speculative execution policy", + logPrefix, + cause); + return; + } if (nextDelay >= 0) { scheduleSpeculativeExecution(nextExecution, nextDelay); } else { @@ -551,7 +600,7 @@ private void scheduleSpeculativeExecution(int index, long delay) { if (!result.isDone()) { LOG.trace( "[{}] Starting speculative execution {}", - CqlRequestHandler.this.logPrefix, + CqlRequestHandler.this.handlerLogPrefix, index); activeExecutionsCount.incrementAndGet(); startedSpeculativeExecutionsCount.incrementAndGet(); @@ -561,7 +610,7 @@ private void scheduleSpeculativeExecution(int index, long delay) { .getMetricUpdater() .incrementCounter( DefaultNodeMetric.SPECULATIVE_EXECUTIONS, executionProfile.getName()); - sendRequest(null, queryPlan, index, 0, true); + sendRequest(statement, null, queryPlan, index, 0, true); } }, delay, @@ -598,26 +647,24 @@ public void onResponse(Frame responseFrame) { if (responseMessage instanceof SchemaChange) { SchemaChange schemaChange = (SchemaChange) responseMessage; context - .getTopologyMonitor() - .checkSchemaAgreement() - .thenCombine( - context - .getMetadataManager() - .refreshSchema(schemaChange.keyspace, false, false) - .exceptionally( - error -> { - Loggers.warnWithException( - LOG, - "[{}] Error while refreshing schema after DDL query, " - + "new metadata might be incomplete", - logPrefix, - error); - return null; - }), - (schemaInAgreement, metadata) -> schemaInAgreement) + .getMetadataManager() + .refreshSchema(schemaChange.keyspace, false, false) .whenComplete( - ((schemaInAgreement, error) -> - setFinalResult(schemaChange, responseFrame, schemaInAgreement, this))); + (result, error) -> { + boolean schemaInAgreement; + if (error != null) { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error while refreshing schema after DDL query, " + + "keeping previous version", + logPrefix, + error); + schemaInAgreement = false; + } else { + schemaInAgreement = result.isSchemaInAgreement(); + } + setFinalResult(schemaChange, responseFrame, schemaInAgreement, this); + }); } else if (responseMessage instanceof SetKeyspace) { SetKeyspace setKeyspace = (SetKeyspace) responseMessage; session @@ -635,40 +682,47 @@ public void onResponse(Frame responseFrame) { new IllegalStateException("Unexpected response " + responseMessage), nodeResponseTimeNanos); setFinalError( - new IllegalStateException("Unexpected response " + responseMessage), node, execution); + statement, + new IllegalStateException("Unexpected response " + responseMessage), + node, + execution); } } catch (Throwable t) { trackNodeError(node, t, nodeResponseTimeNanos); - setFinalError(t, node, execution); + setFinalError(statement, t, node, execution); } } private void processErrorResponse(Error errorMessage) { if (errorMessage.code == ProtocolConstants.ErrorCode.UNPREPARED) { - LOG.trace("[{}] Statement is not prepared on {}, repreparing", logPrefix, node); - ByteBuffer id = ByteBuffer.wrap(((Unprepared) errorMessage).id); - RepreparePayload repreparePayload = session.getRepreparePayloads().get(id); + ByteBuffer idToReprepare = ByteBuffer.wrap(((Unprepared) errorMessage).id); + LOG.trace( + "[{}] Statement {} is not prepared on {}, repreparing", + logPrefix, + Bytes.toHexString(idToReprepare), + node); + RepreparePayload repreparePayload = session.getRepreparePayloads().get(idToReprepare); if (repreparePayload == null) { throw new IllegalStateException( String.format( "Tried to execute unprepared query %s but we don't have the data to reprepare it", - Bytes.toHexString(id))); + Bytes.toHexString(idToReprepare))); } - Prepare reprepareMessage = new Prepare(repreparePayload.query); - ThrottledAdminRequestHandler reprepareHandler = - new ThrottledAdminRequestHandler( + Prepare reprepareMessage = repreparePayload.toMessage(); + ThrottledAdminRequestHandler reprepareHandler = + ThrottledAdminRequestHandler.prepare( channel, + true, reprepareMessage, repreparePayload.customPayload, - timeout, + Conversions.resolveRequestTimeout(statement, executionProfile), throttler, sessionMetricUpdater, - logPrefix, - "Reprepare " + reprepareMessage.toString()); + logPrefix); reprepareHandler .start() .handle( - (result, exception) -> { + (repreparedId, exception) -> { if (exception != null) { // If the error is not recoverable, surface it to the client instead of retrying if (exception instanceof UnexpectedResponseException) { @@ -682,21 +736,35 @@ private void processErrorResponse(Error errorMessage) { || prepareError instanceof ProtocolError) { LOG.trace("[{}] Unrecoverable error on reprepare, rethrowing", logPrefix); trackNodeError(node, prepareError, NANOTIME_NOT_MEASURED_YET); - setFinalError(prepareError, node, execution); + setFinalError(statement, prepareError, node, execution); return null; } } } else if (exception instanceof RequestThrottlingException) { - setFinalError(exception, node, execution); + trackNodeError(node, exception, NANOTIME_NOT_MEASURED_YET); + setFinalError(statement, exception, node, execution); return null; } recordError(node, exception); trackNodeError(node, exception, NANOTIME_NOT_MEASURED_YET); LOG.trace("[{}] Reprepare failed, trying next node", logPrefix); - sendRequest(null, queryPlan, execution, retryCount, false); + sendRequest(statement, null, queryPlan, execution, retryCount, false); } else { + if (!repreparedId.equals(idToReprepare)) { + IllegalStateException illegalStateException = + new IllegalStateException( + String.format( + "ID mismatch while trying to reprepare (expected %s, got %s). " + + "This prepared statement won't work anymore. " + + "This usually happens when you run a 'USE...' query after " + + "the statement was prepared.", + Bytes.toHexString(idToReprepare), + Bytes.toHexString(repreparedId))); + trackNodeError(node, illegalStateException, NANOTIME_NOT_MEASURED_YET); + setFinalError(statement, illegalStateException, node, execution); + } LOG.trace("[{}] Reprepare sucessful, retrying", logPrefix); - sendRequest(node, queryPlan, execution, retryCount, false); + sendRequest(statement, node, queryPlan, execution, retryCount, false); } return null; }); @@ -708,20 +776,21 @@ private void processErrorResponse(Error errorMessage) { LOG.trace("[{}] {} is bootstrapping, trying next node", logPrefix, node); recordError(node, error); trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(null, queryPlan, execution, retryCount, false); + sendRequest(statement, null, queryPlan, execution, retryCount, false); } else if (error instanceof QueryValidationException || error instanceof FunctionFailureException || error instanceof ProtocolError) { LOG.trace("[{}] Unrecoverable error, rethrowing", logPrefix); metricUpdater.incrementCounter(DefaultNodeMetric.OTHER_ERRORS, executionProfile.getName()); trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(error, node, execution); + setFinalError(statement, error, node, execution); } else { - RetryDecision decision; + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + RetryVerdict verdict; if (error instanceof ReadTimeoutException) { ReadTimeoutException readTimeout = (ReadTimeoutException) error; - decision = - retryPolicy.onReadTimeout( + verdict = + retryPolicy.onReadTimeoutVerdict( statement, readTimeout.getConsistencyLevel(), readTimeout.getBlockFor(), @@ -730,32 +799,32 @@ private void processErrorResponse(Error errorMessage) { retryCount); updateErrorMetrics( metricUpdater, - decision, + verdict, DefaultNodeMetric.READ_TIMEOUTS, DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT); } else if (error instanceof WriteTimeoutException) { WriteTimeoutException writeTimeout = (WriteTimeoutException) error; - decision = - isIdempotent - ? retryPolicy.onWriteTimeout( + verdict = + Conversions.resolveIdempotence(statement, executionProfile) + ? retryPolicy.onWriteTimeoutVerdict( statement, writeTimeout.getConsistencyLevel(), writeTimeout.getWriteType(), writeTimeout.getBlockFor(), writeTimeout.getReceived(), retryCount) - : RetryDecision.RETHROW; + : RetryVerdict.RETHROW; updateErrorMetrics( metricUpdater, - decision, + verdict, DefaultNodeMetric.WRITE_TIMEOUTS, DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT); } else if (error instanceof UnavailableException) { UnavailableException unavailable = (UnavailableException) error; - decision = - retryPolicy.onUnavailable( + verdict = + retryPolicy.onUnavailableVerdict( statement, unavailable.getConsistencyLevel(), unavailable.getRequired(), @@ -763,42 +832,54 @@ private void processErrorResponse(Error errorMessage) { retryCount); updateErrorMetrics( metricUpdater, - decision, + verdict, DefaultNodeMetric.UNAVAILABLES, DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, DefaultNodeMetric.IGNORES_ON_UNAVAILABLE); } else { - decision = - isIdempotent - ? retryPolicy.onErrorResponse(statement, error, retryCount) - : RetryDecision.RETHROW; + verdict = + Conversions.resolveIdempotence(statement, executionProfile) + ? retryPolicy.onErrorResponseVerdict(statement, error, retryCount) + : RetryVerdict.RETHROW; updateErrorMetrics( metricUpdater, - decision, + verdict, DefaultNodeMetric.OTHER_ERRORS, DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, DefaultNodeMetric.IGNORES_ON_OTHER_ERROR); } - processRetryDecision(decision, error); + processRetryVerdict(verdict, error); } } - private void processRetryDecision(RetryDecision decision, Throwable error) { - LOG.trace("[{}] Processing retry decision {}", logPrefix, decision); - switch (decision) { + private void processRetryVerdict(RetryVerdict verdict, Throwable error) { + LOG.trace("[{}] Processing retry decision {}", logPrefix, verdict); + switch (verdict.getRetryDecision()) { case RETRY_SAME: recordError(node, error); trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(node, queryPlan, execution, retryCount + 1, false); + sendRequest( + verdict.getRetryRequest(statement), + node, + queryPlan, + execution, + retryCount + 1, + false); break; case RETRY_NEXT: recordError(node, error); trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - sendRequest(null, queryPlan, execution, retryCount + 1, false); + sendRequest( + verdict.getRetryRequest(statement), + null, + queryPlan, + execution, + retryCount + 1, + false); break; case RETHROW: trackNodeError(node, error, NANOTIME_NOT_MEASURED_YET); - setFinalError(error, node, execution); + setFinalError(statement, error, node, execution); break; case IGNORE: setFinalResult(Void.INSTANCE, null, true, this); @@ -808,12 +889,12 @@ private void processRetryDecision(RetryDecision decision, Throwable error) { private void updateErrorMetrics( NodeMetricUpdater metricUpdater, - RetryDecision decision, + RetryVerdict verdict, DefaultNodeMetric error, DefaultNodeMetric retriesOnError, DefaultNodeMetric ignoresOnError) { metricUpdater.incrementCounter(error, executionProfile.getName()); - switch (decision) { + switch (verdict.getRetryDecision()) { case RETRY_SAME: case RETRY_NEXT: metricUpdater.incrementCounter(DefaultNodeMetric.RETRIES, executionProfile.getName()); @@ -834,17 +915,28 @@ public void onFailure(Throwable error) { if (result.isDone()) { return; } - LOG.trace("[{}] Request failure, processing: {}", logPrefix, error.toString()); - RetryDecision decision; - if (!isIdempotent || error instanceof FrameTooLongException) { - decision = RetryDecision.RETHROW; + LOG.trace("[{}] Request failure, processing: {}", logPrefix, error); + RetryVerdict verdict; + if (!Conversions.resolveIdempotence(statement, executionProfile) + || error instanceof FrameTooLongException) { + verdict = RetryVerdict.RETHROW; } else { - decision = retryPolicy.onRequestAborted(statement, error, retryCount); + try { + RetryPolicy retryPolicy = Conversions.resolveRetryPolicy(context, executionProfile); + verdict = retryPolicy.onRequestAbortedVerdict(statement, error, retryCount); + } catch (Throwable cause) { + setFinalError( + statement, + new IllegalStateException("Unexpected error while invoking the retry policy", cause), + null, + execution); + return; + } } - processRetryDecision(decision, error); + processRetryVerdict(verdict, error); updateErrorMetrics( ((DefaultNode) node).getMetricUpdater(), - decision, + verdict, DefaultNodeMetric.ABORTED_REQUESTS, DefaultNodeMetric.RETRIES_ON_ABORTED, DefaultNodeMetric.IGNORES_ON_ABORTED); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java index 53cddc7772b..d3bd40149fb 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/CqlRequestSyncProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java index b2630006b9a..243e9aeb775 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -101,7 +103,7 @@ public CompletionStage fetchNextPage() throws IllegalStateExcept throw new IllegalStateException( "No next page. Use #hasMorePages before calling this method to avoid this error."); } - Statement statement = executionInfo.getStatement(); + Statement statement = (Statement) executionInfo.getRequest(); LOG.trace("Fetching next page for {}", statement); Statement nextStatement = statement.copy(nextState); return session.executeAsync(nextStatement); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java index ad9fdbc0913..38b6cf242a1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBatchStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -56,6 +58,7 @@ public class DefaultBatchStatement implements BatchStatement { private final ConsistencyLevel serialConsistencyLevel; private final Duration timeout; private final Node node; + private final int nowInSeconds; public DefaultBatchStatement( BatchType batchType, @@ -75,7 +78,8 @@ public DefaultBatchStatement( ConsistencyLevel consistencyLevel, ConsistencyLevel serialConsistencyLevel, Duration timeout, - Node node) { + Node node, + int nowInSeconds) { this.batchType = batchType; this.statements = ImmutableList.copyOf(statements); this.executionProfileName = executionProfileName; @@ -94,6 +98,7 @@ public DefaultBatchStatement( this.serialConsistencyLevel = serialConsistencyLevel; this.timeout = timeout; this.node = node; + this.nowInSeconds = nowInSeconds; } @NonNull @@ -123,7 +128,8 @@ public BatchStatement setBatchType(@NonNull BatchType newBatchType) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -147,7 +153,8 @@ public BatchStatement setKeyspace(@Nullable CqlIdentifier newKeyspace) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -175,7 +182,8 @@ public BatchStatement add(@NonNull BatchableStatement statement) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } } @@ -207,7 +215,8 @@ public BatchStatement addAll(@NonNull Iterable> consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } } @@ -237,7 +246,8 @@ public BatchStatement clear() { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -272,7 +282,8 @@ public BatchStatement setPagingState(ByteBuffer newPagingState) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -301,7 +312,8 @@ public BatchStatement setPageSize(int newPageSize) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -331,7 +343,8 @@ public BatchStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsiste newConsistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -362,7 +375,8 @@ public BatchStatement setSerialConsistencyLevel( consistencyLevel, newSerialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -377,7 +391,7 @@ public BatchStatement setExecutionProfileName(@Nullable String newConfigProfileN batchType, statements, newConfigProfileName, - executionProfile, + (newConfigProfileName == null) ? executionProfile : null, keyspace, routingKeyspace, routingKey, @@ -391,7 +405,8 @@ public BatchStatement setExecutionProfileName(@Nullable String newConfigProfileN consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -405,7 +420,7 @@ public DefaultBatchStatement setExecutionProfile(@Nullable DriverExecutionProfil return new DefaultBatchStatement( batchType, statements, - executionProfileName, + (newProfile == null) ? executionProfileName : null, newProfile, keyspace, routingKeyspace, @@ -420,7 +435,8 @@ public DefaultBatchStatement setExecutionProfile(@Nullable DriverExecutionProfil consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -473,7 +489,8 @@ public BatchStatement setRoutingKeyspace(CqlIdentifier newRoutingKeyspace) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -497,7 +514,8 @@ public BatchStatement setNode(@Nullable Node newNode) { consistencyLevel, serialConsistencyLevel, timeout, - newNode); + newNode, + nowInSeconds); } @Nullable @@ -542,7 +560,8 @@ public BatchStatement setRoutingKey(ByteBuffer newRoutingKey) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -581,7 +600,8 @@ public BatchStatement setRoutingToken(Token newRoutingToken) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -611,7 +631,8 @@ public DefaultBatchStatement setCustomPayload(@NonNull Map n consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -646,7 +667,8 @@ public DefaultBatchStatement setIdempotent(Boolean newIdempotence) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -675,7 +697,8 @@ public BatchStatement setTracing(boolean newTracing) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -704,7 +727,8 @@ public BatchStatement setQueryTimestamp(long newTimestamp) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -728,6 +752,37 @@ public BatchStatement setTimeout(@Nullable Duration newTimeout) { consistencyLevel, serialConsistencyLevel, newTimeout, - node); + node, + nowInSeconds); + } + + @Override + public int getNowInSeconds() { + return nowInSeconds; + } + + @NonNull + @Override + public BatchStatement setNowInSeconds(int newNowInSeconds) { + return new DefaultBatchStatement( + batchType, + statements, + executionProfileName, + executionProfile, + keyspace, + routingKeyspace, + routingKey, + routingToken, + customPayload, + idempotent, + tracing, + timestamp, + pagingState, + pageSize, + consistencyLevel, + serialConsistencyLevel, + timeout, + node, + newNowInSeconds); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java index b0842670f06..3cf99c1be6e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultBoundStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,6 +61,7 @@ public class DefaultBoundStatement implements BoundStatement { private final CodecRegistry codecRegistry; private final ProtocolVersion protocolVersion; private final Node node; + private final int nowInSeconds; public DefaultBoundStatement( PreparedStatement preparedStatement, @@ -80,7 +83,8 @@ public DefaultBoundStatement( Duration timeout, CodecRegistry codecRegistry, ProtocolVersion protocolVersion, - Node node) { + Node node, + int nowInSeconds) { this.preparedStatement = preparedStatement; this.variableDefinitions = variableDefinitions; this.values = values; @@ -101,6 +105,7 @@ public DefaultBoundStatement( this.codecRegistry = codecRegistry; this.protocolVersion = protocolVersion; this.node = node; + this.nowInSeconds = nowInSeconds; } @Override @@ -114,6 +119,16 @@ public DataType getType(int i) { return variableDefinitions.get(i).getType(); } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + List indices = variableDefinitions.allIndicesOf(id); + if (indices.isEmpty()) { + throw new IllegalArgumentException(id + " is not a variable in this bound statement"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { int indexOf = variableDefinitions.firstIndexOf(id); @@ -123,6 +138,16 @@ public int firstIndexOf(@NonNull CqlIdentifier id) { return indexOf; } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + List indices = variableDefinitions.allIndicesOf(name); + if (indices.isEmpty()) { + throw new IllegalArgumentException(name + " is not a variable in this bound statement"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull String name) { int indexOf = variableDefinitions.firstIndexOf(name); @@ -174,7 +199,8 @@ public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @NonNull @@ -202,7 +228,7 @@ public BoundStatement setExecutionProfileName(@Nullable String newConfigProfileN variableDefinitions, values, newConfigProfileName, - executionProfile, + (newConfigProfileName == null) ? executionProfile : null, routingKeyspace, routingKey, routingToken, @@ -217,7 +243,8 @@ public BoundStatement setExecutionProfileName(@Nullable String newConfigProfileN timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -232,7 +259,7 @@ public BoundStatement setExecutionProfile(@Nullable DriverExecutionProfile newPr preparedStatement, variableDefinitions, values, - executionProfileName, + (newProfile == null) ? executionProfileName : null, newProfile, routingKeyspace, routingKey, @@ -248,7 +275,8 @@ public BoundStatement setExecutionProfile(@Nullable DriverExecutionProfile newPr timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -258,7 +286,7 @@ public CqlIdentifier getRoutingKeyspace() { if (routingKeyspace != null) { return routingKeyspace; } else { - ColumnDefinitions definitions = preparedStatement.getResultSetDefinitions(); + ColumnDefinitions definitions = preparedStatement.getVariableDefinitions(); return (definitions.size() == 0) ? null : definitions.get(0).getKeyspace(); } } @@ -286,7 +314,8 @@ public BoundStatement setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeysp timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @NonNull @@ -312,7 +341,8 @@ public BoundStatement setNode(@Nullable Node newNode) { timeout, codecRegistry, protocolVersion, - newNode); + newNode, + nowInSeconds); } @Nullable @@ -330,7 +360,8 @@ public ByteBuffer getRoutingKey() { if (indices.isEmpty()) { return null; } else if (indices.size() == 1) { - return getBytesUnsafe(indices.get(0)); + int index = indices.get(0); + return isSet(index) ? getBytesUnsafe(index) : null; } else { ByteBuffer[] components = new ByteBuffer[indices.size()]; for (int i = 0; i < components.length; i++) { @@ -370,7 +401,8 @@ public BoundStatement setRoutingKey(@Nullable ByteBuffer newRoutingKey) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -401,7 +433,8 @@ public BoundStatement setRoutingToken(@Nullable Token newRoutingToken) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @NonNull @@ -433,7 +466,8 @@ public BoundStatement setCustomPayload(@NonNull Map newCusto timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -464,7 +498,8 @@ public BoundStatement setIdempotent(@Nullable Boolean newIdempotence) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -495,7 +530,8 @@ public BoundStatement setTracing(boolean newTracing) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -526,7 +562,8 @@ public BoundStatement setQueryTimestamp(long newTimestamp) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Nullable @@ -558,7 +595,8 @@ public BoundStatement setTimeout(@Nullable Duration newTimeout) { newTimeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -589,7 +627,8 @@ public BoundStatement setPagingState(@Nullable ByteBuffer newPagingState) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Override @@ -620,7 +659,8 @@ public BoundStatement setPageSize(int newPageSize) { timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Nullable @@ -652,7 +692,8 @@ public BoundStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsiste timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); } @Nullable @@ -685,6 +726,39 @@ public BoundStatement setSerialConsistencyLevel( timeout, codecRegistry, protocolVersion, - node); + node, + nowInSeconds); + } + + @Override + public int getNowInSeconds() { + return nowInSeconds; + } + + @NonNull + @Override + public BoundStatement setNowInSeconds(int newNowInSeconds) { + return new DefaultBoundStatement( + preparedStatement, + variableDefinitions, + values, + executionProfileName, + executionProfile, + routingKeyspace, + routingKey, + routingToken, + customPayload, + idempotent, + tracing, + timestamp, + pagingState, + pageSize, + consistencyLevel, + serialConsistencyLevel, + timeout, + codecRegistry, + protocolVersion, + node, + newNowInSeconds); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java index 94df9234eaa..e003637c07f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinition.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java index 74b345d79bb..58304cb4f67 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultColumnDefinitions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -74,11 +76,23 @@ public boolean contains(@NonNull CqlIdentifier id) { return index.firstIndexOf(id) >= 0; } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + return index.allIndicesOf(name); + } + @Override public int firstIndexOf(@NonNull String name) { return index.firstIndexOf(name); } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + return index.allIndicesOf(id); + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { return index.firstIndexOf(id); @@ -111,7 +125,8 @@ private Object writeReplace() { return new SerializationProxy(this); } - private void readObject(ObjectInputStream stream) throws InvalidObjectException { + private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) + throws InvalidObjectException { // Should never be called since we serialized a proxy throw new InvalidObjectException("Proxy required"); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java index bf542923405..3ab57ddc598 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultExecutionInfo.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +19,11 @@ import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.PagingState; import com.datastax.oss.driver.api.core.cql.QueryTrace; import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.session.DefaultSession; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; @@ -37,7 +41,7 @@ @Immutable public class DefaultExecutionInfo implements ExecutionInfo { - private final Statement statement; + private final Request request; private final Node coordinator; private final int speculativeExecutionCount; private final int successfulExecutionIndex; @@ -54,7 +58,7 @@ public class DefaultExecutionInfo implements ExecutionInfo { private final DriverExecutionProfile executionProfile; public DefaultExecutionInfo( - Statement statement, + Request request, Node coordinator, int speculativeExecutionCount, int successfulExecutionIndex, @@ -65,7 +69,8 @@ public DefaultExecutionInfo( DefaultSession session, InternalDriverContext context, DriverExecutionProfile executionProfile) { - this.statement = statement; + + this.request = request; this.coordinator = coordinator; this.speculativeExecutionCount = speculativeExecutionCount; this.successfulExecutionIndex = successfulExecutionIndex; @@ -86,8 +91,15 @@ public DefaultExecutionInfo( @NonNull @Override + @Deprecated public Statement getStatement() { - return statement; + return (Statement) request; + } + + @NonNull + @Override + public Request getRequest() { + return request; } @Nullable @@ -120,6 +132,20 @@ public ByteBuffer getPagingState() { return pagingState; } + @Nullable + @Override + public PagingState getSafePagingState() { + if (pagingState == null) { + return null; + } else { + if (!(request instanceof Statement)) { + throw new IllegalStateException("Only statements should have a paging state"); + } + Statement statement = (Statement) request; + return new DefaultPagingState(pagingState, statement, session.getContext()); + } + } + @NonNull @Override public List getWarnings() { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java new file mode 100644 index 00000000000..71243285e3e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPagingState.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.cql; + +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PagingState; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.data.ValuesHelper; +import com.datastax.oss.protocol.internal.util.Bytes; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; + +public class DefaultPagingState implements PagingState { + + private final ByteBuffer rawPagingState; + private final byte[] hash; + private final int protocolVersion; + + public DefaultPagingState( + ByteBuffer rawPagingState, Statement statement, AttachmentPoint attachmentPoint) { + this( + rawPagingState, + hash(statement, rawPagingState, attachmentPoint), + attachmentPoint.getProtocolVersion().getCode()); + } + + private DefaultPagingState(ByteBuffer rawPagingState, byte[] hash, int protocolVersion) { + this.rawPagingState = rawPagingState; + this.hash = hash; + this.protocolVersion = protocolVersion; + } + + // Same serialized form as in driver 3: + // size of raw state|size of hash|raw state|hash|protocol version + // + // The protocol version might be absent, in which case it defaults to V2 (this is for backward + // compatibility with 2.0.10 where it is always absent). + public static DefaultPagingState fromBytes(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.wrap(bytes); + short rawPagingStateLength = buffer.getShort(); + short hashLength = buffer.getShort(); + int length = rawPagingStateLength + hashLength + 2; + int legacyLength = rawPagingStateLength + hashLength; // without protocol version + if (buffer.remaining() != length && buffer.remaining() != legacyLength) { + throw new IllegalArgumentException( + "Cannot deserialize paging state, invalid format. The serialized form was corrupted, " + + "or not initially generated from a PagingState object."); + } + byte[] rawPagingState = new byte[rawPagingStateLength]; + buffer.get(rawPagingState); + byte[] hash = new byte[hashLength]; + buffer.get(hash); + int protocolVersion = buffer.hasRemaining() ? buffer.getShort() : 2; + return new DefaultPagingState(ByteBuffer.wrap(rawPagingState), hash, protocolVersion); + } + + @Override + public byte[] toBytes() { + ByteBuffer buffer = ByteBuffer.allocate(rawPagingState.remaining() + hash.length + 6); + buffer.putShort((short) rawPagingState.remaining()); + buffer.putShort((short) hash.length); + buffer.put(rawPagingState.duplicate()); + buffer.put(hash); + buffer.putShort((short) protocolVersion); + buffer.rewind(); + return buffer.array(); + } + + public static DefaultPagingState fromString(String string) { + byte[] bytes = Bytes.getArray(Bytes.fromHexString("0x" + string)); + return fromBytes(bytes); + } + + @Override + public String toString() { + return Bytes.toHexString(toBytes()).substring(2); // remove "0x" prefix + } + + @Override + public boolean matches(@NonNull Statement statement, @Nullable Session session) { + AttachmentPoint attachmentPoint = + (session == null) ? AttachmentPoint.NONE : session.getContext(); + byte[] actual = hash(statement, rawPagingState, attachmentPoint); + return Arrays.equals(actual, hash); + } + + @NonNull + @Override + public ByteBuffer getRawPagingState() { + return rawPagingState; + } + + // Hashes a statement's query string and parameters. We also include the paging state itself in + // the hash computation, to make the serialized form a bit more resistant to manual tampering. + private static byte[] hash( + @NonNull Statement statement, + ByteBuffer rawPagingState, + @NonNull AttachmentPoint attachmentPoint) { + // Batch statements don't have paging, the driver should never call this method for one + assert !(statement instanceof BatchStatement); + + MessageDigest messageDigest; + try { + messageDigest = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException( + "It looks like this JVM doesn't support MD5 digests, " + + "can't use the rich paging state feature", + e); + } + if (statement instanceof BoundStatement) { + BoundStatement boundStatement = (BoundStatement) statement; + String queryString = boundStatement.getPreparedStatement().getQuery(); + messageDigest.update(queryString.getBytes(Charset.defaultCharset())); + for (ByteBuffer value : boundStatement.getValues()) { + messageDigest.update(value.duplicate()); + } + } else { + SimpleStatement simpleStatement = (SimpleStatement) statement; + String queryString = simpleStatement.getQuery(); + messageDigest.update(queryString.getBytes(Charset.defaultCharset())); + for (Object value : simpleStatement.getPositionalValues()) { + ByteBuffer encodedValue = + ValuesHelper.encodeToDefaultCqlMapping( + value, attachmentPoint.getCodecRegistry(), attachmentPoint.getProtocolVersion()); + messageDigest.update(encodedValue); + } + for (Object value : simpleStatement.getNamedValues().values()) { + ByteBuffer encodedValue = + ValuesHelper.encodeToDefaultCqlMapping( + value, attachmentPoint.getCodecRegistry(), attachmentPoint.getProtocolVersion()); + messageDigest.update(encodedValue); + } + } + messageDigest.update(rawPagingState.duplicate()); + return messageDigest.digest(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java index 149a4cb3017..7f87dbe5b51 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPrepareRequest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java index 8dfadf9f5a3..e45e1e5add0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultPreparedStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.internal.core.data.ValuesHelper; @@ -165,7 +168,7 @@ public BoundStatement bind(@NonNull Object... values) { customPayloadForBoundStatements, areBoundStatementsIdempotent, areBoundStatementsTracing, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, pagingStateForBoundStatements, pageSizeForBoundStatements, consistencyLevelForBoundStatements, @@ -173,7 +176,8 @@ public BoundStatement bind(@NonNull Object... values) { timeoutForBoundStatements, codecRegistry, protocolVersion, - null); + null, + Statement.NO_NOW_IN_SECONDS); } @NonNull @@ -192,7 +196,7 @@ public BoundStatementBuilder boundStatementBuilder(@NonNull Object... values) { customPayloadForBoundStatements, areBoundStatementsIdempotent, areBoundStatementsTracing, - Long.MIN_VALUE, + Statement.NO_DEFAULT_TIMESTAMP, pagingStateForBoundStatements, pageSizeForBoundStatements, consistencyLevelForBoundStatements, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java index 1caaace911c..db95cc408b6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultQueryTrace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +21,7 @@ import com.datastax.oss.driver.api.core.cql.TraceEvent; import edu.umd.cs.findbugs.annotations.NonNull; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.UUID; @@ -30,7 +33,7 @@ public class DefaultQueryTrace implements QueryTrace { private final UUID tracingId; private final String requestType; private final int durationMicros; - private final InetAddress coordinator; + private final InetSocketAddress coordinator; private final Map parameters; private final long startedAt; private final List events; @@ -39,7 +42,7 @@ public DefaultQueryTrace( UUID tracingId, String requestType, int durationMicros, - InetAddress coordinator, + InetSocketAddress coordinator, Map parameters, long startedAt, List events) { @@ -71,7 +74,14 @@ public int getDurationMicros() { @NonNull @Override + @Deprecated public InetAddress getCoordinator() { + return coordinator.getAddress(); + } + + @NonNull + @Override + public InetSocketAddress getCoordinatorAddress() { return coordinator; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java index 1b4db7968f6..d6bf39ab9c0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultRow.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -68,6 +70,16 @@ public DataType getType(int i) { return definitions.get(i).getType(); } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + List indices = definitions.allIndicesOf(id); + if (indices.isEmpty()) { + throw new IllegalArgumentException(id + " is not a column in this row"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { int indexOf = definitions.firstIndexOf(id); @@ -83,6 +95,16 @@ public DataType getType(@NonNull CqlIdentifier id) { return definitions.get(firstIndexOf(id)).getType(); } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + List indices = definitions.allIndicesOf(name); + if (indices.isEmpty()) { + throw new IllegalArgumentException(name + " is not a column in this row"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull String name) { int indexOf = definitions.firstIndexOf(name); @@ -134,7 +156,8 @@ private Object writeReplace() { return new SerializationProxy(this); } - private void readObject(ObjectInputStream stream) throws InvalidObjectException { + private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) + throws InvalidObjectException { // Should never be called since we serialized a proxy throw new InvalidObjectException("Proxy required"); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java index acad2e11051..c763860479e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultSimpleStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -55,6 +57,7 @@ public class DefaultSimpleStatement implements SimpleStatement { private final ConsistencyLevel serialConsistencyLevel; private final Duration timeout; private final Node node; + private final int nowInSeconds; /** @see SimpleStatement#builder(String) */ public DefaultSimpleStatement( @@ -76,7 +79,8 @@ public DefaultSimpleStatement( ConsistencyLevel consistencyLevel, ConsistencyLevel serialConsistencyLevel, Duration timeout, - Node node) { + Node node, + int nowInSeconds) { if (!positionalValues.isEmpty() && !namedValues.isEmpty()) { throw new IllegalArgumentException("Can't have both positional and named values"); } @@ -99,6 +103,7 @@ public DefaultSimpleStatement( this.serialConsistencyLevel = serialConsistencyLevel; this.timeout = timeout; this.node = node; + this.nowInSeconds = nowInSeconds; } @NonNull @@ -129,7 +134,8 @@ public SimpleStatement setQuery(@NonNull String newQuery) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -160,7 +166,8 @@ public SimpleStatement setPositionalValues(@NonNull List newPositionalVa consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -191,7 +198,8 @@ public SimpleStatement setNamedValuesWithIds(@NonNull Map consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -208,7 +216,7 @@ public SimpleStatement setExecutionProfileName(@Nullable String newConfigProfile positionalValues, namedValues, newConfigProfileName, - executionProfile, + (newConfigProfileName == null) ? executionProfile : null, keyspace, routingKeyspace, routingKey, @@ -222,7 +230,8 @@ public SimpleStatement setExecutionProfileName(@Nullable String newConfigProfile consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -238,7 +247,7 @@ public SimpleStatement setExecutionProfile(@Nullable DriverExecutionProfile newP query, positionalValues, namedValues, - null, + (newProfile == null) ? executionProfileName : null, newProfile, keyspace, routingKeyspace, @@ -253,7 +262,8 @@ public SimpleStatement setExecutionProfile(@Nullable DriverExecutionProfile newP consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -284,7 +294,8 @@ public SimpleStatement setKeyspace(@Nullable CqlIdentifier newKeyspace) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -315,7 +326,8 @@ public SimpleStatement setRoutingKeyspace(@Nullable CqlIdentifier newRoutingKeys consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -340,7 +352,8 @@ public SimpleStatement setNode(@Nullable Node newNode) { consistencyLevel, serialConsistencyLevel, timeout, - newNode); + newNode, + nowInSeconds); } @Nullable @@ -377,7 +390,8 @@ public SimpleStatement setRoutingKey(@Nullable ByteBuffer newRoutingKey) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -408,7 +422,8 @@ public SimpleStatement setRoutingToken(@Nullable Token newRoutingToken) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @NonNull @@ -439,7 +454,8 @@ public SimpleStatement setCustomPayload(@NonNull Map newCust consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -470,7 +486,8 @@ public SimpleStatement setIdempotent(@Nullable Boolean newIdempotence) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -500,7 +517,8 @@ public SimpleStatement setTracing(boolean newTracing) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -530,7 +548,8 @@ public SimpleStatement setQueryTimestamp(long newTimestamp) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -561,7 +580,8 @@ public SimpleStatement setTimeout(@Nullable Duration newTimeout) { consistencyLevel, serialConsistencyLevel, newTimeout, - node); + node, + nowInSeconds); } @Nullable @@ -592,7 +612,8 @@ public SimpleStatement setPagingState(@Nullable ByteBuffer newPagingState) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Override @@ -622,7 +643,8 @@ public SimpleStatement setPageSize(int newPageSize) { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -653,7 +675,8 @@ public SimpleStatement setConsistencyLevel(@Nullable ConsistencyLevel newConsist newConsistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } @Nullable @@ -685,7 +708,39 @@ public SimpleStatement setSerialConsistencyLevel( consistencyLevel, newSerialConsistencyLevel, timeout, - node); + node, + nowInSeconds); + } + + @Override + public int getNowInSeconds() { + return nowInSeconds; + } + + @NonNull + @Override + public SimpleStatement setNowInSeconds(int newNowInSeconds) { + return new DefaultSimpleStatement( + query, + positionalValues, + namedValues, + executionProfileName, + executionProfile, + keyspace, + routingKeyspace, + routingKey, + routingToken, + customPayload, + idempotent, + tracing, + timestamp, + pagingState, + pageSize, + consistencyLevel, + serialConsistencyLevel, + timeout, + node, + newNowInSeconds); } public static Map wrapKeys(Map namedValues) { @@ -721,7 +776,8 @@ public boolean equals(Object other) { && Objects.equals(this.consistencyLevel, that.consistencyLevel) && Objects.equals(this.serialConsistencyLevel, that.serialConsistencyLevel) && Objects.equals(this.timeout, that.timeout) - && Objects.equals(this.node, that.node); + && Objects.equals(this.node, that.node) + && this.nowInSeconds == that.nowInSeconds; } else { return false; } @@ -748,6 +804,7 @@ public int hashCode() { consistencyLevel, serialConsistencyLevel, timeout, - node); + node, + nowInSeconds); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java index fab045bd588..9bf7ff7c8ee 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/DefaultTraceEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.datastax.oss.driver.api.core.cql.TraceEvent; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.util.Date; import net.jcip.annotations.Immutable; @@ -25,14 +28,14 @@ public class DefaultTraceEvent implements TraceEvent { private final String activity; private final long timestamp; - private final InetAddress source; + private final InetSocketAddress source; private final int sourceElapsedMicros; private final String threadName; public DefaultTraceEvent( String activity, long timestamp, - InetAddress source, + InetSocketAddress source, int sourceElapsedMicros, String threadName) { this.activity = activity; @@ -54,7 +57,13 @@ public long getTimestamp() { } @Override + @Deprecated public InetAddress getSource() { + return source.getAddress(); + } + + @Override + public InetSocketAddress getSourceAddress() { return source; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java index fde195ad74a..53cfee98b3e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/EmptyColumnDefinitions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,6 +24,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import java.util.Collections; import java.util.Iterator; +import java.util.List; /** * The singleton that represents no column definitions (implemented as an enum which provides the @@ -51,11 +54,23 @@ public boolean contains(@NonNull CqlIdentifier id) { return false; } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + return Collections.emptyList(); + } + @Override public int firstIndexOf(@NonNull String name) { return -1; } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + return Collections.emptyList(); + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { return -1; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java index e80b442726d..2115a127dc6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/MultiPageResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java index 977b1521f6b..742699d2c1e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliterator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -89,7 +91,7 @@ public boolean tryAdvance(Consumer action) { @Nullable public Spliterator trySplit() { if (estimatedSize != Long.MAX_VALUE && estimatedSize <= chunkSize) { - // There is not point in splitting if the number of remaining elements is below the chunk size + // There is no point in splitting if the number of remaining elements is below the chunk size return null; } ElementT row = iterable.one(); @@ -140,7 +142,7 @@ public static class Builder { @NonNull public Builder withEstimatedSize(long estimatedSize) { - Preconditions.checkArgument(estimatedSize > 0, "estimatedSize must be > 0"); + Preconditions.checkArgument(estimatedSize >= 0, "estimatedSize must be >= 0"); this.estimatedSize = estimatedSize; return this; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java index ebe7f906c25..7ea54aa3b0e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcher.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,7 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; import io.netty.util.concurrent.EventExecutor; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.time.Instant; import java.util.ArrayList; @@ -58,7 +61,7 @@ class QueryTraceFetcher { String regularConsistency = config.getString(DefaultDriverOption.REQUEST_CONSISTENCY); String traceConsistency = config.getString(DefaultDriverOption.REQUEST_TRACE_CONSISTENCY); this.config = - (traceConsistency.equals(regularConsistency)) + traceConsistency.equals(regularConsistency) ? config : config.withString(DefaultDriverOption.REQUEST_CONSISTENCY, traceConsistency); @@ -135,20 +138,28 @@ private QueryTrace buildTrace(Row sessionRow, Iterable eventRows) { ImmutableList.Builder eventsBuilder = ImmutableList.builder(); for (Row eventRow : eventRows) { UUID eventId = eventRow.getUuid("event_id"); + int sourcePort = 0; + if (eventRow.getColumnDefinitions().contains("source_port")) { + sourcePort = eventRow.getInt("source_port"); + } eventsBuilder.add( new DefaultTraceEvent( eventRow.getString("activity"), eventId == null ? -1 : eventId.timestamp(), - eventRow.getInetAddress("source"), + new InetSocketAddress(eventRow.getInetAddress("source"), sourcePort), eventRow.getInt("source_elapsed"), eventRow.getString("thread"))); } Instant startedAt = sessionRow.getInstant("started_at"); + int coordinatorPort = 0; + if (sessionRow.getColumnDefinitions().contains("coordinator_port")) { + coordinatorPort = sessionRow.getInt("coordinator_port"); + } return new DefaultQueryTrace( tracingId, sessionRow.getString("request"), sessionRow.getInt("duration"), - sessionRow.getInetAddress("coordinator"), + new InetSocketAddress(sessionRow.getInetAddress("coordinator"), coordinatorPort), sessionRow.getMap("parameters", String.class, String.class), startedAt == null ? -1 : startedAt.toEpochMilli(), eventsBuilder.build()); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java index dfd5fc8def1..eb15d92acc5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/ResultSets.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +22,7 @@ public class ResultSets { public static ResultSet newInstance(AsyncResultSet firstPage) { - return (firstPage.hasMorePages()) + return firstPage.hasMorePages() ? new MultiPageResultSet(firstPage) : new SinglePageResultSet(firstPage); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java index d22e634938d..eb33da3f430 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/cql/SinglePageResultSet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java index 9317a3f5a36..77cfa759237 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValue.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,9 +31,13 @@ import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Objects; -import net.jcip.annotations.Immutable; +import net.jcip.annotations.NotThreadSafe; -@Immutable +/** + * Implementation note: contrary to most GettableBy* and SettableBy* implementations, this class is + * mutable. + */ +@NotThreadSafe public class DefaultTupleValue implements TupleValue, Serializable { private static final long serialVersionUID = 1; @@ -107,7 +113,8 @@ private Object writeReplace() { return new SerializationProxy(this); } - private void readObject(ObjectInputStream stream) throws InvalidObjectException { + private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) + throws InvalidObjectException { // Should never be called since we serialized a proxy throw new InvalidObjectException("Proxy required"); } @@ -167,11 +174,6 @@ public int hashCode() { return result; } - @Override - public String toString() { - return codecRegistry().codecFor(type).format(this); - } - private static class SerializationProxy implements Serializable { private static final long serialVersionUID = 1; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java index 5bed077a76d..c9bf986fcc8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValue.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,10 +31,15 @@ import java.io.ObjectInputStream; import java.io.Serializable; import java.nio.ByteBuffer; +import java.util.List; import java.util.Objects; -import net.jcip.annotations.Immutable; +import net.jcip.annotations.NotThreadSafe; -@Immutable +/** + * Implementation note: contrary to most GettableBy* and SettableBy* implementations, this class is + * mutable. + */ +@NotThreadSafe public class DefaultUdtValue implements UdtValue, Serializable { private static final long serialVersionUID = 1; @@ -71,6 +78,16 @@ public int size() { return values.length; } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + List indices = type.allIndicesOf(id); + if (indices.isEmpty()) { + throw new IllegalArgumentException(id + " is not a field in this UDT"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { int indexOf = type.firstIndexOf(id); @@ -80,6 +97,16 @@ public int firstIndexOf(@NonNull CqlIdentifier id) { return indexOf; } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + List indices = type.allIndicesOf(name); + if (indices.isEmpty()) { + throw new IllegalArgumentException(name + " is not a field in this UDT"); + } + return indices; + } + @Override public int firstIndexOf(@NonNull String name) { int indexOf = type.firstIndexOf(name); @@ -171,11 +198,6 @@ public int hashCode() { return result; } - @Override - public String toString() { - return codecRegistry().codecFor(type).format(this); - } - /** * @serialData The type of the tuple, followed by an array of byte arrays representing the values * (null values are represented by {@code null}). @@ -184,7 +206,8 @@ private Object writeReplace() { return new SerializationProxy(this); } - private void readObject(ObjectInputStream stream) throws InvalidObjectException { + private void readObject(@SuppressWarnings("unused") ObjectInputStream stream) + throws InvalidObjectException { // Should never be called since we serialized a proxy throw new InvalidObjectException("Proxy required"); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java index 0d649220df5..d35c164eb84 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/data/IdentifierIndex.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,9 +22,11 @@ import com.datastax.oss.driver.api.core.data.GettableById; import com.datastax.oss.driver.api.core.data.GettableByName; import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableListMultimap; +import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; +import java.util.Iterator; import java.util.List; -import java.util.Map; +import java.util.Locale; import net.jcip.annotations.Immutable; /** @@ -34,22 +38,38 @@ @Immutable public class IdentifierIndex { - private final Map byId; - private final Map byCaseSensitiveName; - private final Map byCaseInsensitiveName; + private final ListMultimap byId; + private final ListMultimap byCaseSensitiveName; + private final ListMultimap byCaseInsensitiveName; public IdentifierIndex(List ids) { - this.byId = Maps.newHashMapWithExpectedSize(ids.size()); - this.byCaseSensitiveName = Maps.newHashMapWithExpectedSize(ids.size()); - this.byCaseInsensitiveName = Maps.newHashMapWithExpectedSize(ids.size()); + ImmutableListMultimap.Builder byId = ImmutableListMultimap.builder(); + ImmutableListMultimap.Builder byCaseSensitiveName = + ImmutableListMultimap.builder(); + ImmutableListMultimap.Builder byCaseInsensitiveName = + ImmutableListMultimap.builder(); int i = 0; for (CqlIdentifier id : ids) { - byId.putIfAbsent(id, i); - byCaseSensitiveName.putIfAbsent(id.asInternal(), i); - byCaseInsensitiveName.putIfAbsent(id.asInternal().toLowerCase(), i); + byId.put(id, i); + byCaseSensitiveName.put(id.asInternal(), i); + byCaseInsensitiveName.put(id.asInternal().toLowerCase(Locale.ROOT), i); i += 1; } + + this.byId = byId.build(); + this.byCaseSensitiveName = byCaseSensitiveName.build(); + this.byCaseInsensitiveName = byCaseInsensitiveName.build(); + } + + /** + * Returns all occurrences of a given name, given the matching rules described in {@link + * AccessibleByName}. + */ + public List allIndicesOf(String name) { + return Strings.isDoubleQuoted(name) + ? byCaseSensitiveName.get(Strings.unDoubleQuote(name)) + : byCaseInsensitiveName.get(name.toLowerCase(Locale.ROOT)); } /** @@ -57,16 +77,18 @@ public IdentifierIndex(List ids) { * AccessibleByName}, or -1 if it's not in the list. */ public int firstIndexOf(String name) { - Integer index = - (Strings.isDoubleQuoted(name)) - ? byCaseSensitiveName.get(Strings.unDoubleQuote(name)) - : byCaseInsensitiveName.get(name.toLowerCase()); - return (index == null) ? -1 : index; + Iterator indices = allIndicesOf(name).iterator(); + return indices.hasNext() ? indices.next() : -1; + } + + /** Returns all occurrences of a given identifier. */ + public List allIndicesOf(CqlIdentifier id) { + return byId.get(id); } /** Returns the first occurrence of a given identifier, or -1 if it's not in the list. */ public int firstIndexOf(CqlIdentifier id) { - Integer index = byId.get(id); - return (index == null) ? -1 : index; + Iterator indices = allIndicesOf(id).iterator(); + return indices.hasNext() ? indices.next() : -1; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java index e33068621d0..24490ca2509 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/data/ValuesHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -119,4 +121,21 @@ public static ByteBuffer[] encodePreparedValues( } return encodedValues; } + + public static ByteBuffer encodeToDefaultCqlMapping( + Object value, CodecRegistry codecRegistry, ProtocolVersion protocolVersion) { + if (value instanceof Token) { + if (value instanceof Murmur3Token) { + return TypeCodecs.BIGINT.encode(((Murmur3Token) value).getValue(), protocolVersion); + } else if (value instanceof ByteOrderedToken) { + return TypeCodecs.BLOB.encode(((ByteOrderedToken) value).getValue(), protocolVersion); + } else if (value instanceof RandomToken) { + return TypeCodecs.VARINT.encode(((RandomToken) value).getValue(), protocolVersion); + } else { + throw new IllegalArgumentException("Unsupported token type " + value.getClass()); + } + } else { + return codecRegistry.codecFor(value).encode(value, protocolVersion); + } + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java new file mode 100644 index 00000000000..a02a5eb3148 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java @@ -0,0 +1,498 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.api.core.metadata.TokenMap; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.loadbalancing.helper.DefaultNodeDistanceEvaluatorHelper; +import com.datastax.oss.driver.internal.core.loadbalancing.helper.OptionalLocalDcHelper; +import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.DcAgnosticNodeSet; +import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.MultiDcNodeSet; +import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.NodeSet; +import com.datastax.oss.driver.internal.core.loadbalancing.nodeset.SingleDcNodeSet; +import com.datastax.oss.driver.internal.core.util.ArrayUtils; +import com.datastax.oss.driver.internal.core.util.collection.CompositeQueryPlan; +import com.datastax.oss.driver.internal.core.util.collection.LazyQueryPlan; +import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; +import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; +import com.datastax.oss.driver.shaded.guava.common.base.Predicates; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Queue; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.IntUnaryOperator; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A basic implementation of {@link LoadBalancingPolicy} that can serve as a building block for more + * advanced use cases. + * + *

      To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver + * configuration, for example: + * + *

      + * datastax-java-driver {
      + *   basic.load-balancing-policy {
      + *     class = BasicLoadBalancingPolicy
      + *     local-datacenter = datacenter1 # optional
      + *   }
      + * }
      + * 
      + * + * See {@code reference.conf} (in the manual or core driver JAR) for more details. + * + *

      Local datacenter: This implementation will only define a local datacenter if it is + * explicitly set either through configuration or programmatically; if the local datacenter is + * unspecified, this implementation will effectively act as a datacenter-agnostic load balancing + * policy and will consider all nodes in the cluster when creating query plans, regardless of their + * datacenter. + * + *

      Query plan: This implementation prioritizes replica nodes over non-replica ones; if + * more than one replica is available, the replicas will be shuffled. Non-replica nodes will be + * included in a round-robin fashion. If the local datacenter is defined (see above), query plans + * will only include local nodes, never remote ones; if it is unspecified however, query plans may + * contain nodes from different datacenters. + * + *

      This class is not recommended for normal users who should always prefer {@link + * DefaultLoadBalancingPolicy}. + */ +@ThreadSafe +public class BasicLoadBalancingPolicy implements LoadBalancingPolicy { + + private static final Logger LOG = LoggerFactory.getLogger(BasicLoadBalancingPolicy.class); + + protected static final IntUnaryOperator INCREMENT = i -> (i == Integer.MAX_VALUE) ? 0 : i + 1; + private static final Object[] EMPTY_NODES = new Object[0]; + + @NonNull protected final InternalDriverContext context; + @NonNull protected final DriverExecutionProfile profile; + @NonNull protected final String logPrefix; + + protected final AtomicInteger roundRobinAmount = new AtomicInteger(); + + private final int maxNodesPerRemoteDc; + private final boolean allowDcFailoverForLocalCl; + private final ConsistencyLevel defaultConsistencyLevel; + + // private because they should be set in init() and never be modified after + private volatile DistanceReporter distanceReporter; + private volatile NodeDistanceEvaluator nodeDistanceEvaluator; + private volatile String localDc; + private volatile NodeSet liveNodes; + private final LinkedHashSet preferredRemoteDcs; + + public BasicLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { + this.context = (InternalDriverContext) context; + profile = context.getConfig().getProfile(profileName); + logPrefix = context.getSessionName() + "|" + profileName; + maxNodesPerRemoteDc = + profile.getInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC); + allowDcFailoverForLocalCl = + profile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS); + defaultConsistencyLevel = + this.context + .getConsistencyLevelRegistry() + .nameToLevel(profile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)); + + preferredRemoteDcs = + new LinkedHashSet<>( + profile.getStringList( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)); + } + + /** + * Returns the local datacenter name, if known; empty otherwise. + * + *

      When this method returns null, then datacenter awareness is completely disabled. All + * non-ignored nodes will be considered "local" regardless of their actual datacenters, and will + * have equal chances of being selected for query plans. + * + *

      After the policy is {@linkplain #init(Map, DistanceReporter) initialized} this method will + * return the local datacenter that was discovered by calling {@link #discoverLocalDc(Map)}. + * Before initialization, this method always returns null. + */ + @Nullable + public String getLocalDatacenter() { + return localDc; + } + + @NonNull + @Override + public Map getStartupConfiguration() { + ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); + if (localDc != null) { + builder.put("localDc", localDc); + } else { + // Local data center may not be discovered prior to connection pool initialization. + // In such scenario, return configured local data center name. + // Note that when using DC inferring load balancing policy, startup configuration + // may not show local DC name, because it will be discovered only once control connection + // is established and datacenter of contact points known. + Optional configuredDc = + new OptionalLocalDcHelper(context, profile, logPrefix).configuredLocalDc(); + configuredDc.ifPresent(d -> builder.put("localDc", d)); + } + if (!preferredRemoteDcs.isEmpty()) { + builder.put("preferredRemoteDcs", preferredRemoteDcs); + } + if (allowDcFailoverForLocalCl) { + builder.put("allowDcFailoverForLocalCl", allowDcFailoverForLocalCl); + } + if (maxNodesPerRemoteDc > 0) { + builder.put("maxNodesPerRemoteDc", maxNodesPerRemoteDc); + } + return ImmutableMap.of(BasicLoadBalancingPolicy.class.getSimpleName(), builder.build()); + } + + /** @return The nodes currently considered as live. */ + protected NodeSet getLiveNodes() { + return liveNodes; + } + + @Override + public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { + this.distanceReporter = distanceReporter; + localDc = discoverLocalDc(nodes).orElse(null); + nodeDistanceEvaluator = createNodeDistanceEvaluator(localDc, nodes); + liveNodes = + localDc == null + ? new DcAgnosticNodeSet() + : maxNodesPerRemoteDc <= 0 ? new SingleDcNodeSet(localDc) : new MultiDcNodeSet(); + for (Node node : nodes.values()) { + NodeDistance distance = computeNodeDistance(node); + distanceReporter.setDistance(node, distance); + if (distance != NodeDistance.IGNORED && node.getState() != NodeState.DOWN) { + // This includes state == UNKNOWN. If the node turns out to be unreachable, this will be + // detected when we try to open a pool to it, it will get marked down and this will be + // signaled back to this policy, which will then remove it from the live set. + liveNodes.add(node); + } + } + } + + /** + * Returns the local datacenter, if it can be discovered, or returns {@link Optional#empty empty} + * otherwise. + * + *

      This method is called only once, during {@linkplain LoadBalancingPolicy#init(Map, + * LoadBalancingPolicy.DistanceReporter) initialization}. + * + *

      Implementors may choose to throw {@link IllegalStateException} instead of returning {@link + * Optional#empty empty}, if they require a local datacenter to be defined in order to operate + * properly. + * + *

      If this method returns empty, then datacenter awareness will be completely disabled. All + * non-ignored nodes will be considered "local" regardless of their actual datacenters, and will + * have equal chances of being selected for query plans. + * + * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) + * when the load balancing policy was initialized. This argument is provided in case + * implementors need to inspect the cluster topology to discover the local datacenter. + * @return The local datacenter, or {@link Optional#empty empty} if none found. + * @throws IllegalStateException if the local datacenter could not be discovered, and this policy + * cannot operate without it. + */ + @NonNull + protected Optional discoverLocalDc(@NonNull Map nodes) { + return new OptionalLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); + } + + /** + * Creates a new node distance evaluator to use with this policy. + * + *

      This method is called only once, during {@linkplain LoadBalancingPolicy#init(Map, + * LoadBalancingPolicy.DistanceReporter) initialization}, and only after local datacenter + * discovery has been attempted. + * + * @param localDc The local datacenter that was just discovered, or null if none found. + * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) + * when the load balancing policy was initialized. This argument is provided in case + * implementors need to inspect the cluster topology to create the evaluator. + * @return the distance evaluator to use. + */ + @NonNull + protected NodeDistanceEvaluator createNodeDistanceEvaluator( + @Nullable String localDc, @NonNull Map nodes) { + return new DefaultNodeDistanceEvaluatorHelper(context, profile, logPrefix) + .createNodeDistanceEvaluator(localDc, nodes); + } + + @NonNull + @Override + public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { + // Take a snapshot since the set is concurrent: + Object[] currentNodes = liveNodes.dc(localDc).toArray(); + + Set allReplicas = getReplicas(request, session); + int replicaCount = 0; // in currentNodes + + if (!allReplicas.isEmpty()) { + // Move replicas to the beginning + for (int i = 0; i < currentNodes.length; i++) { + Node node = (Node) currentNodes[i]; + if (allReplicas.contains(node)) { + ArrayUtils.bubbleUp(currentNodes, i, replicaCount); + replicaCount += 1; + } + } + + if (replicaCount > 1) { + shuffleHead(currentNodes, replicaCount); + } + } + + LOG.trace("[{}] Prioritizing {} local replicas", logPrefix, replicaCount); + + // Round-robin the remaining nodes + ArrayUtils.rotate( + currentNodes, + replicaCount, + currentNodes.length - replicaCount, + roundRobinAmount.getAndUpdate(INCREMENT)); + + QueryPlan plan = currentNodes.length == 0 ? QueryPlan.EMPTY : new SimpleQueryPlan(currentNodes); + return maybeAddDcFailover(request, plan); + } + + @NonNull + protected Set getReplicas(@Nullable Request request, @Nullable Session session) { + if (request == null || session == null) { + return Collections.emptySet(); + } + + Optional maybeTokenMap = context.getMetadataManager().getMetadata().getTokenMap(); + if (!maybeTokenMap.isPresent()) { + return Collections.emptySet(); + } + + // Note: we're on the hot path and the getXxx methods are potentially more than simple getters, + // so we only call each method when strictly necessary (which is why the code below looks a bit + // weird). + CqlIdentifier keyspace; + Token token; + ByteBuffer key; + try { + keyspace = request.getKeyspace(); + if (keyspace == null) { + keyspace = request.getRoutingKeyspace(); + } + if (keyspace == null && session.getKeyspace().isPresent()) { + keyspace = session.getKeyspace().get(); + } + if (keyspace == null) { + return Collections.emptySet(); + } + + token = request.getRoutingToken(); + key = (token == null) ? request.getRoutingKey() : null; + if (token == null && key == null) { + return Collections.emptySet(); + } + } catch (Exception e) { + // Protect against poorly-implemented Request instances + LOG.error("Unexpected error while trying to compute query plan", e); + return Collections.emptySet(); + } + + TokenMap tokenMap = maybeTokenMap.get(); + return token != null + ? tokenMap.getReplicas(keyspace, token) + : tokenMap.getReplicas(keyspace, key); + } + + @NonNull + protected Queue maybeAddDcFailover(@Nullable Request request, @NonNull Queue local) { + if (maxNodesPerRemoteDc <= 0 || localDc == null) { + return local; + } + if (!allowDcFailoverForLocalCl && request instanceof Statement) { + Statement statement = (Statement) request; + ConsistencyLevel consistency = statement.getConsistencyLevel(); + if (consistency == null) { + consistency = defaultConsistencyLevel; + } + if (consistency.isDcLocal()) { + return local; + } + } + if (preferredRemoteDcs.isEmpty()) { + return new CompositeQueryPlan(local, buildRemoteQueryPlanAll()); + } + return new CompositeQueryPlan(local, buildRemoteQueryPlanPreferred()); + } + + private QueryPlan buildRemoteQueryPlanAll() { + + return new LazyQueryPlan() { + @Override + protected Object[] computeNodes() { + + Object[] remoteNodes = + liveNodes.dcs().stream() + .filter(Predicates.not(Predicates.equalTo(localDc))) + .flatMap(dc -> liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc)) + .toArray(); + if (remoteNodes.length == 0) { + return EMPTY_NODES; + } + shuffleHead(remoteNodes, remoteNodes.length); + return remoteNodes; + } + }; + } + + private QueryPlan buildRemoteQueryPlanPreferred() { + + Set dcs = liveNodes.dcs(); + List orderedDcs = Lists.newArrayListWithCapacity(dcs.size()); + orderedDcs.addAll(preferredRemoteDcs); + orderedDcs.addAll(Sets.difference(dcs, preferredRemoteDcs)); + + QueryPlan[] queryPlans = + orderedDcs.stream() + .filter(Predicates.not(Predicates.equalTo(localDc))) + .map( + (dc) -> { + return new LazyQueryPlan() { + @Override + protected Object[] computeNodes() { + Object[] rv = liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc).toArray(); + if (rv.length == 0) { + return EMPTY_NODES; + } + shuffleHead(rv, rv.length); + return rv; + } + }; + }) + .toArray(QueryPlan[]::new); + + return new CompositeQueryPlan(queryPlans); + } + + /** Exposed as a protected method so that it can be accessed by tests */ + protected void shuffleHead(Object[] currentNodes, int headLength) { + ArrayUtils.shuffleHead(currentNodes, headLength); + } + + @Override + public void onAdd(@NonNull Node node) { + NodeDistance distance = computeNodeDistance(node); + // Setting to a non-ignored distance triggers the session to open a pool, which will in turn + // set the node UP when the first channel gets opened, then #onUp will be called, and the + // node will be eventually added to the live set. + distanceReporter.setDistance(node, distance); + LOG.debug("[{}] {} was added, setting distance to {}", logPrefix, node, distance); + } + + @Override + public void onUp(@NonNull Node node) { + NodeDistance distance = computeNodeDistance(node); + if (node.getDistance() != distance) { + distanceReporter.setDistance(node, distance); + } + if (distance != NodeDistance.IGNORED && liveNodes.add(node)) { + LOG.debug("[{}] {} came back UP, added to live set", logPrefix, node); + } + } + + @Override + public void onDown(@NonNull Node node) { + if (liveNodes.remove(node)) { + LOG.debug("[{}] {} went DOWN, removed from live set", logPrefix, node); + } + } + + @Override + public void onRemove(@NonNull Node node) { + if (liveNodes.remove(node)) { + LOG.debug("[{}] {} was removed, removed from live set", logPrefix, node); + } + } + + /** + * Computes the distance of the given node. + * + *

      This method is called during {@linkplain #init(Map, DistanceReporter) initialization}, when + * a node {@linkplain #onAdd(Node) is added}, and when a node {@linkplain #onUp(Node) is back UP}. + */ + protected NodeDistance computeNodeDistance(@NonNull Node node) { + // We interrogate the custom evaluator every time since it could be dynamic + // and change its verdict between two invocations of this method. + NodeDistance distance = nodeDistanceEvaluator.evaluateDistance(node, localDc); + if (distance != null) { + return distance; + } + // no local DC defined: all nodes are considered LOCAL. + if (localDc == null) { + return NodeDistance.LOCAL; + } + // otherwise, the node is LOCAL if its datacenter is the local datacenter. + if (Objects.equals(node.getDatacenter(), localDc)) { + return NodeDistance.LOCAL; + } + // otherwise, the node will be either REMOTE or IGNORED, depending + // on how many remote nodes we accept per DC. + if (maxNodesPerRemoteDc > 0) { + Object[] remoteNodes = liveNodes.dc(node.getDatacenter()).toArray(); + for (int i = 0; i < maxNodesPerRemoteDc; i++) { + if (i == remoteNodes.length) { + // there is still room for one more REMOTE node in this DC + return NodeDistance.REMOTE; + } else if (remoteNodes[i] == node) { + return NodeDistance.REMOTE; + } + } + } + return NodeDistance.IGNORED; + } + + @Override + public void close() { + // nothing to do + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java new file mode 100644 index 00000000000..1d978091c9d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicy.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.loadbalancing.helper.InferringLocalDcHelper; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; + +/** + * An implementation of {@link LoadBalancingPolicy} that infers the local datacenter from the + * contact points, if no datacenter was provided neither through configuration nor programmatically. + * + *

      To activate this policy, modify the {@code basic.load-balancing-policy} section in the driver + * configuration, for example: + * + *

      + * datastax-java-driver {
      + *   basic.load-balancing-policy {
      + *     class = DcInferringLoadBalancingPolicy
      + *     local-datacenter = datacenter1 # optional
      + *   }
      + * }
      + * 
      + * + * See {@code reference.conf} (in the manual or core driver JAR) for more details. + * + *

      Local datacenter: This implementation requires a local datacenter to be defined, + * otherwise it will throw an {@link IllegalStateException}. A local datacenter can be supplied + * either: + * + *

        + *
      1. Programmatically with {@link + * com.datastax.oss.driver.api.core.session.SessionBuilder#withLocalDatacenter(String) + * SessionBuilder#withLocalDatacenter(String)}; + *
      2. Through configuration, by defining the option {@link + * DefaultDriverOption#LOAD_BALANCING_LOCAL_DATACENTER + * basic.load-balancing-policy.local-datacenter}; + *
      3. Or implicitly: in this case this implementation will infer the local datacenter from the + * provided contact points, if and only if they are all located in the same datacenter. + *
      + * + *

      Query plan: see {@link DefaultLoadBalancingPolicy} for details on the computation of + * query plans. + * + *

      This class is not recommended for normal users who should always prefer {@link + * DefaultLoadBalancingPolicy}. + */ +@ThreadSafe +public class DcInferringLoadBalancingPolicy extends DefaultLoadBalancingPolicy { + + public DcInferringLoadBalancingPolicy( + @NonNull DriverContext context, @NonNull String profileName) { + super(context, profileName); + } + + @NonNull + @Override + protected Optional discoverLocalDc(@NonNull Map nodes) { + return new InferringLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java index 31fafe8e228..8e1c1fe5039 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,40 +17,38 @@ */ package com.datastax.oss.driver.internal.core.loadbalancing; -import com.datastax.oss.driver.api.core.CqlIdentifier; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; + import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeState; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.DefaultNode; -import com.datastax.oss.driver.internal.core.metadata.MetadataManager; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.internal.core.loadbalancing.helper.MandatoryLocalDcHelper; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.DefaultSession; import com.datastax.oss.driver.internal.core.util.ArrayUtils; -import com.datastax.oss.driver.internal.core.util.Reflection; import com.datastax.oss.driver.internal.core.util.collection.QueryPlan; +import com.datastax.oss.driver.internal.core.util.collection.SimpleQueryPlan; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.MapMaker; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; -import java.util.Collections; +import java.util.BitSet; import java.util.Map; -import java.util.Objects; import java.util.Optional; +import java.util.OptionalLong; import java.util.Queue; import java.util.Set; import java.util.UUID; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.IntUnaryOperator; -import java.util.function.Predicate; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLongArray; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,129 +69,161 @@ * * * See {@code reference.conf} (in the manual or core driver JAR) for more details. + * + *

      Local datacenter: This implementation requires a local datacenter to be defined, + * otherwise it will throw an {@link IllegalStateException}. A local datacenter can be supplied + * either: + * + *

        + *
      1. Programmatically with {@link + * com.datastax.oss.driver.api.core.session.SessionBuilder#withLocalDatacenter(String) + * SessionBuilder#withLocalDatacenter(String)}; + *
      2. Through configuration, by defining the option {@link + * DefaultDriverOption#LOAD_BALANCING_LOCAL_DATACENTER + * basic.load-balancing-policy.local-datacenter}; + *
      3. Or implicitly, if and only if no explicit contact points were provided: in this case this + * implementation will infer the local datacenter from the implicit contact point (localhost). + *
      + * + *

      Query plan: This implementation prioritizes replica nodes over non-replica ones; if + * more than one replica is available, the replicas will be shuffled; if more than 2 replicas are + * available, they will be ordered from most healthy to least healthy ("Power of 2 choices" or busy + * node avoidance algorithm). Non-replica nodes will be included in a round-robin fashion. If the + * local datacenter is defined (see above), query plans will only include local nodes, never remote + * ones; if it is unspecified however, query plans may contain nodes from different datacenters. */ @ThreadSafe -public class DefaultLoadBalancingPolicy implements LoadBalancingPolicy { +public class DefaultLoadBalancingPolicy extends BasicLoadBalancingPolicy implements RequestTracker { private static final Logger LOG = LoggerFactory.getLogger(DefaultLoadBalancingPolicy.class); - private static final Predicate INCLUDE_ALL_NODES = n -> true; - private static final IntUnaryOperator INCREMENT = i -> (i == Integer.MAX_VALUE) ? 0 : i + 1; - private final String logPrefix; - private final MetadataManager metadataManager; - private final Predicate filter; - private final AtomicInteger roundRobinAmount = new AtomicInteger(); - private final boolean isDefaultPolicy; - @VisibleForTesting final CopyOnWriteArraySet localDcLiveNodes = new CopyOnWriteArraySet<>(); + private static final long NEWLY_UP_INTERVAL_NANOS = MINUTES.toNanos(1); + private static final int MAX_IN_FLIGHT_THRESHOLD = 10; + private static final long RESPONSE_COUNT_RESET_INTERVAL_NANOS = MILLISECONDS.toNanos(200); - private volatile DistanceReporter distanceReporter; - @VisibleForTesting volatile String localDc; + protected final ConcurrentMap responseTimes; + protected final Map upTimes = new ConcurrentHashMap<>(); + private final boolean avoidSlowReplicas; public DefaultLoadBalancingPolicy(@NonNull DriverContext context, @NonNull String profileName) { - InternalDriverContext internalContext = (InternalDriverContext) context; - - this.logPrefix = context.getSessionName() + "|" + profileName; - DriverExecutionProfile config = context.getConfig().getProfile(profileName); - this.localDc = getLocalDcFromConfig(internalContext, profileName, config); - this.isDefaultPolicy = profileName.equals(DriverExecutionProfile.DEFAULT_NAME); - - this.metadataManager = internalContext.getMetadataManager(); - - Predicate filterFromConfig = getFilterFromConfig(internalContext, profileName); - this.filter = - node -> { - String localDc1 = this.localDc; - if (localDc1 != null && !localDc1.equals(node.getDatacenter())) { - LOG.debug( - "[{}] Ignoring {} because it doesn't belong to the local DC {}", - logPrefix, - node, - localDc1); - return false; - } else if (!filterFromConfig.test(node)) { - LOG.debug( - "[{}] Ignoring {} because it doesn't match the user-provided predicate", - logPrefix, - node); - return false; - } else { - return true; - } - }; + super(context, profileName); + this.avoidSlowReplicas = + profile.getBoolean(DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true); + this.responseTimes = new MapMaker().weakKeys().makeMap(); } + @NonNull @Override - public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { - this.distanceReporter = distanceReporter; - - Set contactPoints = metadataManager.getContactPoints(); - if (localDc == null) { - if (metadataManager.wasImplicitContactPoint()) { - // We allow automatic inference of the local DC in this case - assert contactPoints.size() == 1; - Node contactPoint = contactPoints.iterator().next(); - localDc = contactPoint.getDatacenter(); - LOG.debug("[{}] Local DC set from contact point {}: {}", logPrefix, contactPoint, localDc); - } else { - throw new IllegalStateException( - "You provided explicit contact points, the local DC must be specified (see " - + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() - + " in the config)"); - } + public Optional getRequestTracker() { + if (avoidSlowReplicas) { + return Optional.of(this); } else { - ImmutableMap.Builder builder = ImmutableMap.builder(); - for (Node node : contactPoints) { - String datacenter = node.getDatacenter(); - if (!Objects.equals(localDc, datacenter)) { - builder.put(node, (datacenter == null) ? "" : datacenter); - } - } - ImmutableMap badContactPoints = builder.build(); - if (isDefaultPolicy && !badContactPoints.isEmpty()) { - LOG.warn( - "[{}] You specified {} as the local DC, but some contact points are from a different DC ({})", - logPrefix, - localDc, - badContactPoints); - } + return Optional.empty(); } + } - for (Node node : nodes.values()) { - if (filter.test(node)) { - distanceReporter.setDistance(node, NodeDistance.LOCAL); - if (node.getState() != NodeState.DOWN) { - // This includes state == UNKNOWN. If the node turns out to be unreachable, this will be - // detected when we try to open a pool to it, it will get marked down and this will be - // signaled back to this policy - localDcLiveNodes.add(node); - } - } else { - distanceReporter.setDistance(node, NodeDistance.IGNORED); - } - } + @NonNull + @Override + protected Optional discoverLocalDc(@NonNull Map nodes) { + return new MandatoryLocalDcHelper(context, profile, logPrefix).discoverLocalDc(nodes); } @NonNull @Override public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { + if (!avoidSlowReplicas) { + return super.newQueryPlan(request, session); + } + // Take a snapshot since the set is concurrent: - Object[] currentNodes = localDcLiveNodes.toArray(); + Object[] currentNodes = getLiveNodes().dc(getLocalDatacenter()).toArray(); Set allReplicas = getReplicas(request, session); int replicaCount = 0; // in currentNodes if (!allReplicas.isEmpty()) { - // Move replicas to the beginning + + // Move replicas to the beginning of the plan for (int i = 0; i < currentNodes.length; i++) { Node node = (Node) currentNodes[i]; if (allReplicas.contains(node)) { ArrayUtils.bubbleUp(currentNodes, i, replicaCount); - replicaCount += 1; + replicaCount++; } } if (replicaCount > 1) { + shuffleHead(currentNodes, replicaCount); + + if (replicaCount > 2) { + + assert session != null; + + // Test replicas health + Node newestUpReplica = null; + BitSet unhealthyReplicas = null; // bit mask storing indices of unhealthy replicas + long mostRecentUpTimeNanos = -1; + long now = nanoTime(); + for (int i = 0; i < replicaCount; i++) { + Node node = (Node) currentNodes[i]; + assert node != null; + Long upTimeNanos = upTimes.get(node); + if (upTimeNanos != null + && now - upTimeNanos - NEWLY_UP_INTERVAL_NANOS < 0 + && upTimeNanos - mostRecentUpTimeNanos > 0) { + newestUpReplica = node; + mostRecentUpTimeNanos = upTimeNanos; + } + if (newestUpReplica == null && isUnhealthy(node, session, now)) { + if (unhealthyReplicas == null) { + unhealthyReplicas = new BitSet(replicaCount); + } + unhealthyReplicas.set(i); + } + } + + // When: + // - there isn't any newly UP replica and + // - there is one or more unhealthy replicas and + // - there is a majority of healthy replicas + int unhealthyReplicasCount = + unhealthyReplicas == null ? 0 : unhealthyReplicas.cardinality(); + if (newestUpReplica == null + && unhealthyReplicasCount > 0 + && unhealthyReplicasCount < (replicaCount / 2.0)) { + + // Reorder the unhealthy replicas to the back of the list + // Start from the back of the replicas, then move backwards; + // stop once all unhealthy replicas are moved to the back. + int counter = 0; + for (int i = replicaCount - 1; i >= 0 && counter < unhealthyReplicasCount; i--) { + if (unhealthyReplicas.get(i)) { + ArrayUtils.bubbleDown(currentNodes, i, replicaCount - 1 - counter); + counter++; + } + } + } + + // When: + // - there is a newly UP replica and + // - the replica in first or second position is the most recent replica marked as UP and + // - dice roll 1d4 != 1 + else if ((newestUpReplica == currentNodes[0] || newestUpReplica == currentNodes[1]) + && diceRoll1d4() != 1) { + + // Send it to the back of the replicas + ArrayUtils.bubbleDown( + currentNodes, newestUpReplica == currentNodes[0] ? 0 : 1, replicaCount - 1); + } + + // Reorder the first two replicas in the shuffled list based on the number of + // in-flight requests + if (getInFlight((Node) currentNodes[0], session) + > getInFlight((Node) currentNodes[1], session)) { + ArrayUtils.swap(currentNodes, 0, 1); + } + } } } @@ -204,122 +236,128 @@ public Queue newQueryPlan(@Nullable Request request, @Nullable Session ses currentNodes.length - replicaCount, roundRobinAmount.getAndUpdate(INCREMENT)); - return new QueryPlan(currentNodes); + QueryPlan plan = currentNodes.length == 0 ? QueryPlan.EMPTY : new SimpleQueryPlan(currentNodes); + return maybeAddDcFailover(request, plan); } - private Set getReplicas(Request request, Session session) { - if (request == null || session == null) { - return Collections.emptySet(); - } - - // Note: we're on the hot path and the getXxx methods are potentially more than simple getters, - // so we only call each method when strictly necessary (which is why the code below looks a bit - // weird). - CqlIdentifier keyspace = request.getKeyspace(); - if (keyspace == null) { - keyspace = request.getRoutingKeyspace(); - } - if (keyspace == null && session.getKeyspace().isPresent()) { - keyspace = session.getKeyspace().get(); - } - if (keyspace == null) { - return Collections.emptySet(); - } + @Override + public void onNodeSuccess( + @NonNull Request request, + long latencyNanos, + @NonNull DriverExecutionProfile executionProfile, + @NonNull Node node, + @NonNull String nodeRequestLogPrefix) { + updateResponseTimes(node); + } - Token token = request.getRoutingToken(); - ByteBuffer key = (token == null) ? request.getRoutingKey() : null; - if (token == null && key == null) { - return Collections.emptySet(); - } + @Override + public void onNodeError( + @NonNull Request request, + @NonNull Throwable error, + long latencyNanos, + @NonNull DriverExecutionProfile executionProfile, + @NonNull Node node, + @NonNull String nodeRequestLogPrefix) { + updateResponseTimes(node); + } - Optional maybeTokenMap = metadataManager.getMetadata().getTokenMap(); - if (maybeTokenMap.isPresent()) { - TokenMap tokenMap = maybeTokenMap.get(); - return (token != null) - ? tokenMap.getReplicas(keyspace, token) - : tokenMap.getReplicas(keyspace, key); - } else { - return Collections.emptySet(); - } + /** Exposed as a protected method so that it can be accessed by tests */ + protected long nanoTime() { + return System.nanoTime(); } - @VisibleForTesting - protected void shuffleHead(Object[] currentNodes, int replicaCount) { - ArrayUtils.shuffleHead(currentNodes, replicaCount); + /** Exposed as a protected method so that it can be accessed by tests */ + protected int diceRoll1d4() { + return ThreadLocalRandom.current().nextInt(4); } - @Override - public void onAdd(@NonNull Node node) { - if (filter.test(node)) { - LOG.debug("[{}] {} was added, setting distance to LOCAL", logPrefix, node); - // Setting to a non-ignored distance triggers the session to open a pool, which will in turn - // set the node UP when the first channel gets opened. - distanceReporter.setDistance(node, NodeDistance.LOCAL); - } else { - distanceReporter.setDistance(node, NodeDistance.IGNORED); - } + protected boolean isUnhealthy(@NonNull Node node, @NonNull Session session, long now) { + return isBusy(node, session) && isResponseRateInsufficient(node, now); } - @Override - public void onUp(@NonNull Node node) { - if (filter.test(node)) { - // Normally this is already the case, but the filter could be dynamic and have ignored the - // node previously. - distanceReporter.setDistance(node, NodeDistance.LOCAL); - if (localDcLiveNodes.add(node)) { - LOG.debug("[{}] {} came back UP, added to live set", logPrefix, node); - } - } else { - distanceReporter.setDistance(node, NodeDistance.IGNORED); - } + protected boolean isBusy(@NonNull Node node, @NonNull Session session) { + return getInFlight(node, session) >= MAX_IN_FLIGHT_THRESHOLD; } - @Override - public void onDown(@NonNull Node node) { - if (localDcLiveNodes.remove(node)) { - LOG.debug("[{}] {} went DOWN, removed from live set", logPrefix, node); - } + protected boolean isResponseRateInsufficient(@NonNull Node node, long now) { + NodeResponseRateSample sample = responseTimes.get(node); + return !(sample == null || sample.hasSufficientResponses(now)); } - @Override - public void onRemove(@NonNull Node node) { - if (localDcLiveNodes.remove(node)) { - LOG.debug("[{}] {} was removed, removed from live set", logPrefix, node); - } + /** + * Synchronously updates the response times for the given node. It is synchronous because the + * {@link #DefaultLoadBalancingPolicy(com.datastax.oss.driver.api.core.context.DriverContext, + * java.lang.String) CacheLoader.load} assigned is synchronous. + * + * @param node The node to update. + */ + protected void updateResponseTimes(@NonNull Node node) { + this.responseTimes.compute(node, (k, v) -> v == null ? new NodeResponseRateSample() : v.next()); } - @Override - public void close() { - // nothing to do + protected int getInFlight(@NonNull Node node, @NonNull Session session) { + // The cast will always succeed because there's no way to replace the internal session impl + ChannelPool pool = ((DefaultSession) session).getPools().get(node); + // Note: getInFlight() includes orphaned ids, which is what we want as we need to account + // for requests that were cancelled or timed out (since the node is likely to still be + // processing them). + return (pool == null) ? 0 : pool.getInFlight(); } - private String getLocalDcFromConfig( - InternalDriverContext internalContext, - @NonNull String profileName, - DriverExecutionProfile config) { - String localDc = internalContext.getLocalDatacenter(profileName); - if (localDc != null) { - LOG.debug("[{}] Local DC set from builder: {}", logPrefix, localDc); - } else { - localDc = config.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); - if (localDc != null) { - LOG.debug("[{}] Local DC set from configuration: {}", logPrefix, localDc); - } + protected class NodeResponseRateSample { + + @VisibleForTesting protected final long oldest; + @VisibleForTesting protected final OptionalLong newest; + + private NodeResponseRateSample() { + long now = nanoTime(); + this.oldest = now; + this.newest = OptionalLong.empty(); + } + + private NodeResponseRateSample(long oldestSample) { + this(oldestSample, nanoTime()); + } + + private NodeResponseRateSample(long oldestSample, long newestSample) { + this.oldest = oldestSample; + this.newest = OptionalLong.of(newestSample); + } + + @VisibleForTesting + protected NodeResponseRateSample(AtomicLongArray times) { + assert times.length() >= 1; + this.oldest = times.get(0); + this.newest = (times.length() > 1) ? OptionalLong.of(times.get(1)) : OptionalLong.empty(); + } + + // Our newest sample becomes the oldest in the next generation + private NodeResponseRateSample next() { + return new NodeResponseRateSample(this.getNewestValidSample(), nanoTime()); + } + + // If we have a pair of values return the newest, otherwise we have just one value... so just + // return it + private long getNewestValidSample() { + return this.newest.orElse(this.oldest); + } + + // response rate is considered insufficient when less than 2 responses were obtained in + // the past interval delimited by RESPONSE_COUNT_RESET_INTERVAL_NANOS. + private boolean hasSufficientResponses(long now) { + // If we only have one sample it's an automatic failure + if (!this.newest.isPresent()) return true; + long threshold = now - RESPONSE_COUNT_RESET_INTERVAL_NANOS; + return this.oldest - threshold >= 0; } - return localDc; } - @SuppressWarnings("unchecked") - private Predicate getFilterFromConfig(InternalDriverContext context, String profileName) { - Predicate filterFromBuilder = context.getNodeFilter(profileName); - return (filterFromBuilder != null) - ? filterFromBuilder - : (Predicate) - Reflection.buildFromConfig( - context, - profileName, - DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, - Predicate.class) - .orElse(INCLUDE_ALL_NODES); + @NonNull + @Override + public Map getStartupConfiguration() { + Map parent = super.getStartupConfiguration(); + return ImmutableMap.of( + DefaultLoadBalancingPolicy.class.getSimpleName(), + parent.get(BasicLoadBalancingPolicy.class.getSimpleName())); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java new file mode 100644 index 00000000000..537497b83c8 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/DefaultNodeDistanceEvaluatorHelper.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.util.Reflection; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Map; +import java.util.UUID; +import java.util.function.Predicate; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A {@link NodeDistanceEvaluatorHelper} implementation that fetches the user-supplied evaluator, if + * any, from the programmatic configuration API, or else, from the driver configuration. If no + * user-supplied evaluator can be retrieved, a dummy evaluator will be used which always evaluates + * null distances. + */ +@ThreadSafe +public class DefaultNodeDistanceEvaluatorHelper implements NodeDistanceEvaluatorHelper { + + private static final Logger LOG = + LoggerFactory.getLogger(DefaultNodeDistanceEvaluatorHelper.class); + + @NonNull protected final InternalDriverContext context; + @NonNull protected final DriverExecutionProfile profile; + @NonNull protected final String logPrefix; + + public DefaultNodeDistanceEvaluatorHelper( + @NonNull InternalDriverContext context, + @NonNull DriverExecutionProfile profile, + @NonNull String logPrefix) { + this.context = context; + this.profile = profile; + this.logPrefix = logPrefix; + } + + @NonNull + @Override + public NodeDistanceEvaluator createNodeDistanceEvaluator( + @Nullable String localDc, @NonNull Map nodes) { + NodeDistanceEvaluator nodeDistanceEvaluatorFromConfig = nodeDistanceEvaluatorFromConfig(); + return (node, dc) -> { + NodeDistance distance = nodeDistanceEvaluatorFromConfig.evaluateDistance(node, dc); + if (distance != null) { + LOG.debug("[{}] Evaluator assigned distance {} to node {}", logPrefix, distance, node); + } else { + LOG.debug("[{}] Evaluator did not assign a distance to node {}", logPrefix, node); + } + return distance; + }; + } + + @NonNull + protected NodeDistanceEvaluator nodeDistanceEvaluatorFromConfig() { + NodeDistanceEvaluator evaluator = context.getNodeDistanceEvaluator(profile.getName()); + if (evaluator != null) { + LOG.debug("[{}] Node distance evaluator set programmatically", logPrefix); + } else { + evaluator = + Reflection.buildFromConfig( + context, + profile.getName(), + DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS, + NodeDistanceEvaluator.class) + .orElse(null); + if (evaluator != null) { + LOG.debug("[{}] Node distance evaluator set from configuration", logPrefix); + } else { + @SuppressWarnings({"unchecked", "deprecation"}) + Predicate nodeFilterFromConfig = + Reflection.buildFromConfig( + context, + profile.getName(), + DefaultDriverOption.LOAD_BALANCING_FILTER_CLASS, + Predicate.class) + .orElse(null); + if (nodeFilterFromConfig != null) { + evaluator = new NodeFilterToDistanceEvaluatorAdapter(nodeFilterFromConfig); + LOG.debug( + "[{}] Node distance evaluator set from deprecated node filter configuration", + logPrefix); + } + } + } + if (evaluator == null) { + evaluator = PASS_THROUGH_DISTANCE_EVALUATOR; + } + return evaluator; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java new file mode 100644 index 00000000000..8608b855e8d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/InferringLocalDcHelper.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import static com.datastax.oss.driver.internal.core.time.Clock.LOG; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; + +/** + * An implementation of {@link LocalDcHelper} that fetches the user-supplied datacenter, if any, + * from the programmatic configuration API, or else, from the driver configuration. If no local + * datacenter is explicitly defined, this implementation infers the local datacenter from the + * contact points: if all contact points share the same datacenter, that datacenter is returned. If + * the contact points are from different datacenters, or if no contact points reported any + * datacenter, an {@link IllegalStateException} is thrown. + */ +@ThreadSafe +public class InferringLocalDcHelper extends OptionalLocalDcHelper { + + public InferringLocalDcHelper( + @NonNull InternalDriverContext context, + @NonNull DriverExecutionProfile profile, + @NonNull String logPrefix) { + super(context, profile, logPrefix); + } + + /** @return The local datacenter; always present. */ + @NonNull + @Override + public Optional discoverLocalDc(@NonNull Map nodes) { + Optional optionalLocalDc = super.discoverLocalDc(nodes); + if (optionalLocalDc.isPresent()) { + return optionalLocalDc; + } + Set datacenters = new HashSet<>(); + Set contactPoints = context.getMetadataManager().getContactPoints(); + for (Node node : contactPoints) { + String datacenter = node.getDatacenter(); + if (datacenter != null) { + datacenters.add(datacenter); + } + } + if (datacenters.size() == 1) { + String localDc = datacenters.iterator().next(); + LOG.info("[{}] Inferred local DC from contact points: {}", logPrefix, localDc); + return Optional.of(localDc); + } + if (datacenters.isEmpty()) { + throw new IllegalStateException( + "The local DC could not be inferred from contact points, please set it explicitly (see " + + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() + + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)"); + } + throw new IllegalStateException( + String.format( + "No local DC was provided, but the contact points are from different DCs: %s; " + + "please set the local DC explicitly (see " + + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() + + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)", + formatNodesAndDcs(contactPoints))); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java new file mode 100644 index 00000000000..183c7f90dec --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/LocalDcHelper.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; + +@FunctionalInterface +@ThreadSafe +public interface LocalDcHelper { + + /** + * Returns the local datacenter, if it can be discovered, or returns {@link Optional#empty empty} + * otherwise. + * + *

      Implementors may choose to throw {@link IllegalStateException} instead of returning {@link + * Optional#empty empty}, if they require a local datacenter to be defined in order to operate + * properly. + * + * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) + * when the load balancing policy was {@linkplain LoadBalancingPolicy#init(Map, + * LoadBalancingPolicy.DistanceReporter) initialized}. This argument is provided in case + * implementors need to inspect the cluster topology to discover the local datacenter. + * @return The local datacenter, or {@link Optional#empty empty} if none found. + * @throws IllegalStateException if the local datacenter could not be discovered, and this policy + * cannot operate without it. + */ + @NonNull + Optional discoverLocalDc(@NonNull Map nodes); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java new file mode 100644 index 00000000000..9a0e9a2d4ce --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/MandatoryLocalDcHelper.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An implementation of {@link LocalDcHelper} that fetches the user-supplied datacenter, if any, + * from the programmatic configuration API, or else, from the driver configuration. If no local + * datacenter is explicitly defined, this implementation will consider two distinct situations: + * + *

        + *
      1. If no explicit contact points were provided, this implementation will infer the local + * datacenter from the implicit contact point (localhost). + *
      2. If explicit contact points were provided however, this implementation will throw {@link + * IllegalStateException}. + *
      + */ +@ThreadSafe +public class MandatoryLocalDcHelper extends OptionalLocalDcHelper { + + private static final Logger LOG = LoggerFactory.getLogger(MandatoryLocalDcHelper.class); + + public MandatoryLocalDcHelper( + @NonNull InternalDriverContext context, + @NonNull DriverExecutionProfile profile, + @NonNull String logPrefix) { + super(context, profile, logPrefix); + } + + /** @return The local datacenter; always present. */ + @NonNull + @Override + public Optional discoverLocalDc(@NonNull Map nodes) { + Optional optionalLocalDc = super.discoverLocalDc(nodes); + if (optionalLocalDc.isPresent()) { + return optionalLocalDc; + } + Set contactPoints = context.getMetadataManager().getContactPoints(); + if (context.getMetadataManager().wasImplicitContactPoint()) { + // We only allow automatic inference of the local DC in this specific case + assert contactPoints.size() == 1; + Node contactPoint = contactPoints.iterator().next(); + String localDc = contactPoint.getDatacenter(); + if (localDc != null) { + LOG.debug( + "[{}] Local DC set from implicit contact point {}: {}", + logPrefix, + contactPoint, + localDc); + return Optional.of(localDc); + } else { + throw new IllegalStateException( + "The local DC could not be inferred from implicit contact point, please set it explicitly (see " + + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() + + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter)"); + } + } else { + throw new IllegalStateException( + "Since you provided explicit contact points, the local DC must be explicitly set (see " + + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER.getPath() + + " in the config, or set it programmatically with SessionBuilder.withLocalDatacenter). " + + "Current contact points are: " + + formatNodesAndDcs(contactPoints) + + ". Current DCs in this cluster are: " + + formatDcs(nodes.values())); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java new file mode 100644 index 00000000000..61e094b318a --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeDistanceEvaluatorHelper.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Map; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; + +@FunctionalInterface +@ThreadSafe +public interface NodeDistanceEvaluatorHelper { + + NodeDistanceEvaluator PASS_THROUGH_DISTANCE_EVALUATOR = (node, localDc) -> null; + + /** + * Creates a new node distance evaluator. + * + * @param localDc The local datacenter, or null if none defined. + * @param nodes All the nodes that were known to exist in the cluster (regardless of their state) + * when the load balancing policy was {@linkplain LoadBalancingPolicy#init(Map, + * LoadBalancingPolicy.DistanceReporter) initialized}. This argument is provided in case + * implementors need to inspect the cluster topology to create the node distance evaluator. + * @return the node distance evaluator to use. + */ + @NonNull + NodeDistanceEvaluator createNodeDistanceEvaluator( + @Nullable String localDc, @NonNull Map nodes); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java new file mode 100644 index 00000000000..902018fb7d4 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/NodeFilterToDistanceEvaluatorAdapter.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.function.Predicate; + +public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator { + + private final Predicate nodeFilter; + + public NodeFilterToDistanceEvaluatorAdapter(@NonNull Predicate nodeFilter) { + this.nodeFilter = nodeFilter; + } + + @Nullable + @Override + public NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc) { + return nodeFilter.test(node) ? null : NodeDistance.IGNORED; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java new file mode 100644 index 00000000000..c6143f3fa16 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/helper/OptionalLocalDcHelper.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.helper; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An implementation of {@link LocalDcHelper} that fetches the local datacenter from the + * programmatic configuration API, or else, from the driver configuration. If no user-supplied + * datacenter can be retrieved, it returns {@link Optional#empty empty}. + */ +@ThreadSafe +public class OptionalLocalDcHelper implements LocalDcHelper { + + private static final Logger LOG = LoggerFactory.getLogger(OptionalLocalDcHelper.class); + + @NonNull protected final InternalDriverContext context; + @NonNull protected final DriverExecutionProfile profile; + @NonNull protected final String logPrefix; + + public OptionalLocalDcHelper( + @NonNull InternalDriverContext context, + @NonNull DriverExecutionProfile profile, + @NonNull String logPrefix) { + this.context = context; + this.profile = profile; + this.logPrefix = logPrefix; + } + + /** + * @return The local datacenter from the programmatic configuration API, or from the driver + * configuration; {@link Optional#empty empty} if none found. + */ + @Override + @NonNull + public Optional discoverLocalDc(@NonNull Map nodes) { + Optional localDc = configuredLocalDc(); + if (localDc.isPresent()) { + checkLocalDatacenterCompatibility( + localDc.get(), context.getMetadataManager().getContactPoints()); + } else { + LOG.debug("[{}] Local DC not set, DC awareness will be disabled", logPrefix); + } + return localDc; + } + + /** + * Checks if the contact points are compatible with the local datacenter specified either through + * configuration, or programmatically. + * + *

      The default implementation logs a warning when a contact point reports a datacenter + * different from the local one, and only for the default profile. + * + * @param localDc The local datacenter, as specified in the config, or programmatically. + * @param contactPoints The contact points provided when creating the session. + */ + protected void checkLocalDatacenterCompatibility( + @NonNull String localDc, Set contactPoints) { + if (profile.getName().equals(DriverExecutionProfile.DEFAULT_NAME)) { + Set badContactPoints = new LinkedHashSet<>(); + for (Node node : contactPoints) { + if (!Objects.equals(localDc, node.getDatacenter())) { + badContactPoints.add(node); + } + } + if (!badContactPoints.isEmpty()) { + LOG.warn( + "[{}] You specified {} as the local DC, but some contact points are from a different DC: {}; " + + "please provide the correct local DC, or check your contact points", + logPrefix, + localDc, + formatNodesAndDcs(badContactPoints)); + } + } + } + + /** + * Formats the given nodes as a string detailing each contact point and its datacenter, for + * informational purposes. + */ + @NonNull + protected String formatNodesAndDcs(Iterable nodes) { + List l = new ArrayList<>(); + for (Node node : nodes) { + l.add(node + "=" + node.getDatacenter()); + } + return String.join(", ", l); + } + + /** + * Formats the given nodes as a string detailing each distinct datacenter, for informational + * purposes. + */ + @NonNull + protected String formatDcs(Iterable nodes) { + List l = new ArrayList<>(); + for (Node node : nodes) { + if (node.getDatacenter() != null) { + l.add(node.getDatacenter()); + } + } + return String.join(", ", new TreeSet<>(l)); + } + + /** @return Local data center set programmatically or from configuration file. */ + @NonNull + public Optional configuredLocalDc() { + String localDc = context.getLocalDatacenter(profile.getName()); + if (localDc != null) { + LOG.debug("[{}] Local DC set programmatically: {}", logPrefix, localDc); + return Optional.of(localDc); + } else if (profile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) { + localDc = profile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER); + LOG.debug("[{}] Local DC set from configuration: {}", logPrefix, localDc); + return Optional.of(localDc); + } + return Optional.empty(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java new file mode 100644 index 00000000000..2a6e79023de --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSet.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class DcAgnosticNodeSet implements NodeSet { + + private final Set nodes = new CopyOnWriteArraySet<>(); + + @Override + public boolean add(@NonNull Node node) { + return nodes.add(node); + } + + @Override + public boolean remove(@NonNull Node node) { + return nodes.remove(node); + } + + @Override + @NonNull + public Set dc(@Nullable String dc) { + return nodes; + } + + @Override + public Set dcs() { + return Collections.emptySet(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java new file mode 100644 index 00000000000..37f02bec878 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSet.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.atomic.AtomicBoolean; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class MultiDcNodeSet implements NodeSet { + + private static final String UNKNOWN_DC = ""; + + private final Map> nodes = new ConcurrentHashMap<>(); + + @Override + public boolean add(@NonNull Node node) { + AtomicBoolean added = new AtomicBoolean(); + nodes.compute( + getMapKey(node), + (key, current) -> { + if (current == null) { + // We use CopyOnWriteArraySet because we need + // 1) to preserve insertion order, and + // 2) a "snapshot"-style toArray() implementation + current = new CopyOnWriteArraySet<>(); + } + if (current.add(node)) { + added.set(true); + } + return current; + }); + return added.get(); + } + + @Override + public boolean remove(@NonNull Node node) { + AtomicBoolean removed = new AtomicBoolean(); + nodes.compute( + getMapKey(node), + (key, current) -> { + if (current != null) { + if (current.remove(node)) { + removed.set(true); + } + } + return current; + }); + return removed.get(); + } + + @Override + @NonNull + public Set dc(@Nullable String dc) { + return nodes.getOrDefault(getMapKey(dc), Collections.emptySet()); + } + + @Override + public Set dcs() { + return nodes.keySet(); + } + + @NonNull + private String getMapKey(@NonNull Node node) { + return getMapKey(node.getDatacenter()); + } + + @NonNull + private String getMapKey(@Nullable String dc) { + return dc == null ? UNKNOWN_DC : dc; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java new file mode 100644 index 00000000000..66460e16a7c --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/NodeSet.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; + +/** + * A thread-safe abstraction around a map of nodes per datacenter, to facilitate node management by + * load balancing policies. + */ +@ThreadSafe +public interface NodeSet { + + /** + * Adds the given node to this set. + * + *

      If this set was initialized with datacenter awareness, the node will be added to its + * datacenter's specific set; otherwise, the node is added to a general set containing all nodes + * in the cluster. + * + * @param node The node to add. + * @return true if the node was added, false otherwise (because it was already present). + */ + boolean add(@NonNull Node node); + + /** + * Removes the node from the set. + * + * @param node The node to remove. + * @return true if the node was removed, false otherwise (because it was not present). + */ + boolean remove(@NonNull Node node); + + /** + * Returns the current nodes in the given datacenter. + * + *

      If this set was initialized with datacenter awareness, the returned set will contain only + * nodes pertaining to the given datacenter; otherwise, the given datacenter name is ignored and + * the returned set will contain all nodes in the cluster. + * + * @param dc The datacenter name, or null if the datacenter name is not known, or irrelevant. + * @return the current nodes in the given datacenter. + */ + @NonNull + Set dc(@Nullable String dc); + + /** + * Returns the current datacenter names known to this set. If datacenter awareness has been + * disabled, this method returns an empty set. + */ + Set dcs(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java new file mode 100644 index 00000000000..21c89d46927 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSet.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class SingleDcNodeSet implements NodeSet { + + private final Set nodes = new CopyOnWriteArraySet<>(); + + private final String dc; + private final Set dcs; + + public SingleDcNodeSet(@NonNull String dc) { + this.dc = dc; + dcs = ImmutableSet.of(dc); + } + + @Override + public boolean add(@NonNull Node node) { + if (Objects.equals(node.getDatacenter(), dc)) { + return nodes.add(node); + } + return false; + } + + @Override + public boolean remove(@NonNull Node node) { + if (Objects.equals(node.getDatacenter(), dc)) { + return nodes.remove(node); + } + return false; + } + + @Override + @NonNull + public Set dc(@Nullable String dc) { + if (Objects.equals(this.dc, dc)) { + return nodes; + } + return Collections.emptySet(); + } + + @Override + public Set dcs() { + return dcs; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java index 088d5d0ea68..ac68b92fef2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,11 +39,10 @@ public class AddNodeRefresh extends NodesRefresh { public Result compute( DefaultMetadata oldMetadata, boolean tokenMapEnabled, InternalDriverContext context) { Map oldNodes = oldMetadata.getNodes(); - if (oldNodes.containsKey(newNodeInfo.getHostId())) { - return new Result(oldMetadata); - } else { + Node existing = oldNodes.get(newNodeInfo.getHostId()); + if (existing == null) { DefaultNode newNode = new DefaultNode(newNodeInfo.getEndPoint(), context); - copyInfos(newNodeInfo, newNode, null, context.getSessionName()); + copyInfos(newNodeInfo, newNode, context); Map newNodes = ImmutableMap.builder() .putAll(oldNodes) @@ -50,6 +51,19 @@ public Result compute( return new Result( oldMetadata.withNodes(newNodes, tokenMapEnabled, false, null, context), ImmutableList.of(NodeStateEvent.added(newNode))); + } else { + // If a node is restarted after changing its broadcast RPC address, Cassandra considers that + // an addition, even though the host_id hasn't changed :( + // Update the existing instance and emit an UP event to trigger a pool reconnection. + if (!existing.getEndPoint().equals(newNodeInfo.getEndPoint())) { + copyInfos(newNodeInfo, ((DefaultNode) existing), context); + assert newNodeInfo.getBroadcastRpcAddress().isPresent(); // always for peer nodes + return new Result( + oldMetadata, + ImmutableList.of(TopologyEvent.suggestUp(newNodeInfo.getBroadcastRpcAddress().get()))); + } else { + return new Result(oldMetadata); + } } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java new file mode 100644 index 00000000000..021824a9b16 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/CloudTopologyMonitor.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.net.InetSocketAddress; +import java.util.Objects; +import java.util.UUID; + +public class CloudTopologyMonitor extends DefaultTopologyMonitor { + + private final InetSocketAddress cloudProxyAddress; + + public CloudTopologyMonitor(InternalDriverContext context, InetSocketAddress cloudProxyAddress) { + super(context); + this.cloudProxyAddress = cloudProxyAddress; + } + + @NonNull + @Override + protected EndPoint buildNodeEndPoint( + @NonNull AdminRow row, + @Nullable InetSocketAddress broadcastRpcAddress, + @NonNull EndPoint localEndPoint) { + UUID hostId = Objects.requireNonNull(row.getUuid("host_id")); + return new SniEndPoint(cloudProxyAddress, hostId.toString()); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java index d19470afe2f..7ffbee8e4bb 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPoint.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +19,13 @@ import com.datastax.oss.driver.api.core.metadata.EndPoint; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.net.InetSocketAddress; import java.util.Objects; -public class DefaultEndPoint implements EndPoint { +public class DefaultEndPoint implements EndPoint, Serializable { + + private static final long serialVersionUID = 1; private final InetSocketAddress address; private final String metricPrefix; @@ -41,8 +46,16 @@ public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof DefaultEndPoint) { - DefaultEndPoint that = (DefaultEndPoint) other; - return this.address.equals(that.address); + InetSocketAddress thisAddress = this.address; + InetSocketAddress thatAddress = ((DefaultEndPoint) other).address; + // If only one of the addresses is unresolved, resolve the other. Otherwise (both resolved or + // both unresolved), compare as-is. + if (thisAddress.isUnresolved() && !thatAddress.isUnresolved()) { + thisAddress = new InetSocketAddress(thisAddress.getHostName(), thisAddress.getPort()); + } else if (thatAddress.isUnresolved() && !thisAddress.isUnresolved()) { + thatAddress = new InetSocketAddress(thatAddress.getHostName(), thatAddress.getPort()); + } + return thisAddress.equals(thatAddress); } else { return false; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java index b8c4008775a..38f7e4a093e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,17 +48,22 @@ public class DefaultMetadata implements Metadata { private static final Logger LOG = LoggerFactory.getLogger(DefaultMetadata.class); public static DefaultMetadata EMPTY = - new DefaultMetadata(Collections.emptyMap(), Collections.emptyMap(), null); + new DefaultMetadata(Collections.emptyMap(), Collections.emptyMap(), null, null); protected final Map nodes; protected final Map keyspaces; protected final TokenMap tokenMap; + protected final String clusterName; protected DefaultMetadata( - Map nodes, Map keyspaces, TokenMap tokenMap) { + Map nodes, + Map keyspaces, + TokenMap tokenMap, + String clusterName) { this.nodes = nodes; this.keyspaces = keyspaces; this.tokenMap = tokenMap; + this.clusterName = clusterName; } @NonNull @@ -77,6 +84,12 @@ public Optional getTokenMap() { return Optional.ofNullable(tokenMap); } + @NonNull + @Override + public Optional getClusterName() { + return Optional.ofNullable(clusterName); + } + /** * Refreshes the current metadata with the given list of nodes. * @@ -102,7 +115,8 @@ public DefaultMetadata withNodes( ImmutableMap.copyOf(newNodes), this.keyspaces, rebuildTokenMap( - newNodes, keyspaces, tokenMapEnabled, forceFullRebuild, tokenFactory, context)); + newNodes, keyspaces, tokenMapEnabled, forceFullRebuild, tokenFactory, context), + context.getChannelFactory().getClusterName()); } public DefaultMetadata withSchema( @@ -112,7 +126,8 @@ public DefaultMetadata withSchema( return new DefaultMetadata( this.nodes, ImmutableMap.copyOf(newKeyspaces), - rebuildTokenMap(nodes, newKeyspaces, tokenMapEnabled, false, null, context)); + rebuildTokenMap(nodes, newKeyspaces, tokenMapEnabled, false, null, context), + context.getChannelFactory().getClusterName()); } @Nullable diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java index a4858fcc03f..28f9e2de81c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNode.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,8 +24,10 @@ import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; import java.net.InetSocketAddress; import java.util.Collections; import java.util.Map; @@ -37,10 +41,13 @@ * from {@link MetadataManager}'s admin thread. */ @ThreadSafe -public class DefaultNode implements Node { +public class DefaultNode implements Node, Serializable { + + private static final long serialVersionUID = 1; - private final EndPoint endPoint; - private final NodeMetricUpdater metricUpdater; + private volatile EndPoint endPoint; + // A deserialized node is not attached to a session anymore, so we don't need to retain this + private transient volatile NodeMetricUpdater metricUpdater; volatile InetSocketAddress broadcastRpcAddress; volatile InetSocketAddress broadcastAddress; @@ -80,6 +87,18 @@ public EndPoint getEndPoint() { return endPoint; } + public void setEndPoint(@NonNull EndPoint newEndPoint, @NonNull InternalDriverContext context) { + if (!newEndPoint.equals(endPoint)) { + endPoint = newEndPoint; + + // The endpoint is also used to build metric names, so make sure they get updated + NodeMetricUpdater previousMetricUpdater = metricUpdater; + if (!(previousMetricUpdater instanceof NoopNodeMetricUpdater)) { + metricUpdater = context.getMetricsFactory().newNodeUpdater(this); + } + } + } + @NonNull @Override public Optional getBroadcastRpcAddress() { @@ -116,7 +135,7 @@ public Version getCassandraVersion() { return cassandraVersion; } - @NonNull + @Nullable @Override public UUID getHostId() { return hostId; @@ -165,28 +184,11 @@ public NodeMetricUpdater getMetricUpdater() { return metricUpdater; } - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other instanceof Node) { - Node that = (Node) other; - // hostId is the natural identifier, but unfortunately we don't know it for contact points - // until the driver has opened the first connection. - return this.endPoint.equals(that.getEndPoint()); - } else { - return false; - } - } - - @Override - public int hashCode() { - return endPoint.hashCode(); - } - @Override public String toString() { - return endPoint.toString(); + // Include the hash code because this class uses reference equality + return String.format( + "Node(endPoint=%s, hostId=%s, hashCode=%x)", getEndPoint(), getHostId(), hashCode()); } /** Note: deliberately not exposed by the public interface. */ diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java index 7b34a33856d..8908f0be078 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeInfo.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java index dc7829ab3e7..f3dc988cfbc 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.oss.driver.internal.core.metadata; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metadata.EndPoint; @@ -29,6 +33,8 @@ import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; import com.datastax.oss.protocol.internal.ProtocolConstants; import com.datastax.oss.protocol.internal.response.Error; import edu.umd.cs.findbugs.annotations.NonNull; @@ -44,6 +50,7 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; @@ -63,6 +70,10 @@ public class DefaultTopologyMonitor implements TopologyMonitor { // Assume topology queries never need paging private static final int INFINITE_PAGE_SIZE = -1; + // A few system.peers columns which get special handling below + private static final String NATIVE_PORT = "native_port"; + private static final String NATIVE_TRANSPORT_PORT = "native_transport_port"; + private final String logPrefix; private final InternalDriverContext context; private final ControlConnection controlConnection; @@ -120,7 +131,7 @@ public CompletionStage> refreshNode(Node node) { query( channel, "SELECT * FROM " - + retrievePeerTableName() + + getPeerTableName() + " WHERE peer = :address and peer_port = :port", ImmutableMap.of( "address", @@ -131,12 +142,12 @@ public CompletionStage> refreshNode(Node node) { query = query( channel, - "SELECT * FROM " + retrievePeerTableName() + " WHERE peer = :address", + "SELECT * FROM " + getPeerTableName() + " WHERE peer = :address", ImmutableMap.of("address", node.getBroadcastAddress().get().getAddress())); } return query.thenApply(result -> firstPeerRowAsNodeInfo(result, localEndPoint)); } else { - return query(channel, "SELECT * FROM " + retrievePeerTableName()) + return query(channel, "SELECT * FROM " + getPeerTableName()) .thenApply(result -> findInPeers(result, node.getHostId(), localEndPoint)); } } @@ -149,7 +160,7 @@ public CompletionStage> getNewNodeInfo(InetSocketAddress broa LOG.debug("[{}] Fetching info for new node {}", logPrefix, broadcastRpcAddress); DriverChannel channel = controlConnection.channel(); EndPoint localEndPoint = channel.getEndPoint(); - return query(channel, "SELECT * FROM " + retrievePeerTableName()) + return query(channel, "SELECT * FROM " + getPeerTableName()) .thenApply(result -> findInPeers(result, broadcastRpcAddress, localEndPoint)); } @@ -197,14 +208,18 @@ public CompletionStage> refreshNodeList() { (controlNodeResult, peersResult) -> { List nodeInfos = new ArrayList<>(); AdminRow localRow = controlNodeResult.iterator().next(); - InetSocketAddress localBroadcastRpcAddress = getBroadcastRpcAddress(localRow); + InetSocketAddress localBroadcastRpcAddress = + getBroadcastRpcAddress(localRow, localEndPoint); nodeInfos.add(nodeInfoBuilder(localRow, localBroadcastRpcAddress, localEndPoint).build()); for (AdminRow peerRow : peersResult) { if (isPeerValid(peerRow)) { - InetSocketAddress peerBroadcastRpcAddress = getBroadcastRpcAddress(peerRow); - NodeInfo nodeInfo = - nodeInfoBuilder(peerRow, peerBroadcastRpcAddress, localEndPoint).build(); - nodeInfos.add(nodeInfo); + InetSocketAddress peerBroadcastRpcAddress = + getBroadcastRpcAddress(peerRow, localEndPoint); + if (peerBroadcastRpcAddress != null) { + NodeInfo nodeInfo = + nodeInfoBuilder(peerRow, peerBroadcastRpcAddress, localEndPoint).build(); + nodeInfos.add(nodeInfo); + } } } return nodeInfos; @@ -242,20 +257,23 @@ public CompletionStage forceCloseAsync() { @VisibleForTesting protected CompletionStage query( DriverChannel channel, String queryString, Map parameters) { - return AdminRequestHandler.query( - channel, queryString, parameters, timeout, INFINITE_PAGE_SIZE, logPrefix) - .start(); + AdminRequestHandler handler; + try { + handler = + AdminRequestHandler.query( + channel, queryString, parameters, timeout, INFINITE_PAGE_SIZE, logPrefix); + } catch (Exception e) { + return CompletableFutures.failedFuture(e); + } + return handler.start(); } private CompletionStage query(DriverChannel channel, String queryString) { return query(channel, queryString, Collections.emptyMap()); } - private String retrievePeerTableName() { - if (isSchemaV2) { - return "system.peers_v2"; - } - return "system.peers"; + private String getPeerTableName() { + return isSchemaV2 ? "system.peers_v2" : "system.peers"; } private Optional firstPeerRowAsNodeInfo(AdminResult result, EndPoint localEndPoint) { @@ -263,8 +281,10 @@ private Optional firstPeerRowAsNodeInfo(AdminResult result, EndPoint l if (iterator.hasNext()) { AdminRow row = iterator.next(); if (isPeerValid(row)) { - InetSocketAddress peerBroadcastRpcAddress = getBroadcastRpcAddress(row); - return Optional.of(nodeInfoBuilder(row, peerBroadcastRpcAddress, localEndPoint).build()); + return Optional.ofNullable(getBroadcastRpcAddress(row, localEndPoint)) + .map( + broadcastRpcAddress -> + nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); } } return Optional.empty(); @@ -291,22 +311,7 @@ protected DefaultNodeInfo.Builder nodeInfoBuilder( @Nullable InetSocketAddress broadcastRpcAddress, @NonNull EndPoint localEndPoint) { - boolean peer = row.contains("peer"); - - EndPoint endPoint; - if (peer) { - // If this node is a peer, its broadcast RPC address must be present. - Objects.requireNonNull( - broadcastRpcAddress, "broadcastRpcAddress cannot be null for a peer row"); - // Deployments that use a custom EndPoint implementation will need their own TopologyMonitor. - // One simple approach is to extend this class and override this method. - endPoint = new DefaultEndPoint(context.getAddressTranslator().translate(broadcastRpcAddress)); - } else { - // Don't rely on system.local.rpc_address for the control node, because it mistakenly - // reports the normal RPC address instead of the broadcast one (CASSANDRA-11181). We - // already know the endpoint anyway since we've just used it to query. - endPoint = localEndPoint; - } + EndPoint endPoint = buildNodeEndPoint(row, broadcastRpcAddress, localEndPoint); // in system.local InetAddress broadcastInetAddress = row.getInetAddress("broadcast_address"); @@ -344,18 +349,92 @@ protected DefaultNodeInfo.Builder nodeInfoBuilder( listenAddress = new InetSocketAddress(listenInetAddress, listenPort); } - return DefaultNodeInfo.builder() - .withEndPoint(endPoint) - .withBroadcastRpcAddress(broadcastRpcAddress) - .withBroadcastAddress(broadcastAddress) - .withListenAddress(listenAddress) - .withDatacenter(row.getString("data_center")) - .withRack(row.getString("rack")) - .withCassandraVersion(row.getString("release_version")) - .withTokens(row.getSetOfString("tokens")) - .withPartitioner(row.getString("partitioner")) - .withHostId(Objects.requireNonNull(row.getUuid("host_id"))) - .withSchemaVersion(row.getUuid("schema_version")); + DefaultNodeInfo.Builder builder = + DefaultNodeInfo.builder() + .withEndPoint(endPoint) + .withBroadcastRpcAddress(broadcastRpcAddress) + .withBroadcastAddress(broadcastAddress) + .withListenAddress(listenAddress) + .withDatacenter(row.getString("data_center")) + .withRack(row.getString("rack")) + .withCassandraVersion(row.getString("release_version")) + .withTokens(row.getSetOfString("tokens")) + .withPartitioner(row.getString("partitioner")) + .withHostId(Objects.requireNonNull(row.getUuid("host_id"))) + .withSchemaVersion(row.getUuid("schema_version")); + + // Handle DSE-specific columns, if present + String rawVersion = row.getString("dse_version"); + if (rawVersion != null) { + builder.withExtra(DseNodeProperties.DSE_VERSION, Version.parse(rawVersion)); + } + + ImmutableSet.Builder workloadsBuilder = ImmutableSet.builder(); + Boolean legacyGraph = row.getBoolean("graph"); // DSE 5.0 + if (legacyGraph != null && legacyGraph) { + workloadsBuilder.add("Graph"); + } + String legacyWorkload = row.getString("workload"); // DSE 5.0 (other than graph) + if (legacyWorkload != null) { + workloadsBuilder.add(legacyWorkload); + } + Set modernWorkloads = row.getSetOfString("workloads"); // DSE 5.1+ + if (modernWorkloads != null) { + workloadsBuilder.addAll(modernWorkloads); + } + ImmutableSet workloads = workloadsBuilder.build(); + if (!workloads.isEmpty()) { + builder.withExtra(DseNodeProperties.DSE_WORKLOADS, workloads); + } + + // Note: withExtra discards null values + builder + .withExtra(DseNodeProperties.SERVER_ID, row.getString("server_id")) + .withExtra(DseNodeProperties.NATIVE_TRANSPORT_PORT, row.getInteger("native_transport_port")) + .withExtra( + DseNodeProperties.NATIVE_TRANSPORT_PORT_SSL, + row.getInteger("native_transport_port_ssl")) + .withExtra(DseNodeProperties.STORAGE_PORT, row.getInteger("storage_port")) + .withExtra(DseNodeProperties.STORAGE_PORT_SSL, row.getInteger("storage_port_ssl")) + .withExtra(DseNodeProperties.JMX_PORT, row.getInteger("jmx_port")); + + return builder; + } + + /** + * Builds the node's endpoint from the given row. + * + * @param broadcastRpcAddress this is a parameter only because we already have it when we come + * from {@link #findInPeers(AdminResult, InetSocketAddress, EndPoint)}. Callers that don't + * already have it can use {@link #getBroadcastRpcAddress}. For the control host, this can be + * null; if this node is a peer however, this cannot be null, since we use that address to + * create the node's endpoint. Callers can use {@link #isPeerValid(AdminRow)} to check that + * before calling this method. + * @param localEndPoint the control node endpoint that was used to query the node's system tables. + * This is a parameter because it would be racy to call {@code + * controlConnection.channel().getEndPoint()} from within this method, as the control + * connection may have changed its channel since. So this parameter must be provided by the + * caller. + */ + @NonNull + protected EndPoint buildNodeEndPoint( + @NonNull AdminRow row, + @Nullable InetSocketAddress broadcastRpcAddress, + @NonNull EndPoint localEndPoint) { + boolean peer = row.contains("peer"); + if (peer) { + // If this node is a peer, its broadcast RPC address must be present. + Objects.requireNonNull( + broadcastRpcAddress, "broadcastRpcAddress cannot be null for a peer row"); + // Deployments that use a custom EndPoint implementation will need their own TopologyMonitor. + // One simple approach is to extend this class and override this method. + return new DefaultEndPoint(context.getAddressTranslator().translate(broadcastRpcAddress)); + } else { + // Don't rely on system.local.rpc_address for the control node, because it mistakenly + // reports the normal RPC address instead of the broadcast one (CASSANDRA-11181). We + // already know the endpoint anyway since we've just used it to query. + return localEndPoint; + } } // Called when a new node is being added; the peers table is keyed by broadcast_address, @@ -364,7 +443,7 @@ protected DefaultNodeInfo.Builder nodeInfoBuilder( private Optional findInPeers( AdminResult result, InetSocketAddress broadcastRpcAddressToFind, EndPoint localEndPoint) { for (AdminRow row : result) { - InetSocketAddress broadcastRpcAddress = getBroadcastRpcAddress(row); + InetSocketAddress broadcastRpcAddress = getBroadcastRpcAddress(row, localEndPoint); if (broadcastRpcAddress != null && broadcastRpcAddress.equals(broadcastRpcAddressToFind) && isPeerValid(row)) { @@ -383,8 +462,10 @@ private Optional findInPeers( for (AdminRow row : result) { UUID hostId = row.getUuid("host_id"); if (hostId != null && hostId.equals(hostIdToFind) && isPeerValid(row)) { - InetSocketAddress broadcastRpcAddress = getBroadcastRpcAddress(row); - return Optional.of(nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); + return Optional.ofNullable(getBroadcastRpcAddress(row, localEndPoint)) + .map( + broadcastRpcAddress -> + nodeInfoBuilder(row, broadcastRpcAddress, localEndPoint).build()); } } LOG.debug("[{}] Could not find any peer row matching {}", logPrefix, hostIdToFind); @@ -407,34 +488,92 @@ private void savePort(DriverChannel channel) { * Determines the broadcast RPC address of the node represented by the given row. * * @param row The row to inspect; can represent either a local (control) node or a peer node. + * @param localEndPoint the control node endpoint that was used to query the node's system tables. + * This is a parameter because it would be racy to call {@code + * controlConnection.channel().getEndPoint()} from within this method, as the control + * connection may have changed its channel since. So this parameter must be provided by the + * caller. * @return the broadcast RPC address of the node, if it could be determined; or {@code null} * otherwise. */ @Nullable - protected InetSocketAddress getBroadcastRpcAddress(@NonNull AdminRow row) { - // in system.peers or system.local - InetAddress broadcastRpcInetAddress = row.getInetAddress("rpc_address"); + protected InetSocketAddress getBroadcastRpcAddress( + @NonNull AdminRow row, @NonNull EndPoint localEndPoint) { + + InetAddress broadcastRpcInetAddress = null; + Iterator addrCandidates = + Iterators.forArray( + // in system.peers_v2 (Cassandra >= 4.0) + "native_address", + // DSE 6.8 introduced native_transport_address and native_transport_port for the + // listen address. + "native_transport_address", + // in system.peers or system.local + "rpc_address"); + + while (broadcastRpcInetAddress == null && addrCandidates.hasNext()) + broadcastRpcInetAddress = row.getInetAddress(addrCandidates.next()); + // This could only happen if system tables are corrupted, but handle gracefully if (broadcastRpcInetAddress == null) { - // in system.peers_v2 (Cassandra >= 4.0) - broadcastRpcInetAddress = row.getInetAddress("native_address"); - if (broadcastRpcInetAddress == null) { - // This could only happen if system tables are corrupted, but handle gracefully - return null; + LOG.warn( + "[{}] Unable to determine broadcast RPC IP address, returning null. " + + "This is likely due to a misconfiguration or invalid system tables. " + + "Please validate the contents of system.local and/or {}.", + logPrefix, + getPeerTableName()); + return null; + } + + Integer broadcastRpcPort = null; + Iterator portCandidates = + Iterators.forArray( + // in system.peers_v2 (Cassandra >= 4.0) + NATIVE_PORT, + // DSE 6.8 introduced native_transport_address and native_transport_port for the + // listen address. + NATIVE_TRANSPORT_PORT, + // system.local for Cassandra >= 4.0 + "rpc_port"); + + while ((broadcastRpcPort == null || broadcastRpcPort == 0) && portCandidates.hasNext()) { + + String colName = portCandidates.next(); + broadcastRpcPort = row.getInteger(colName); + // Support override for SSL port (if enabled) in DSE + if (NATIVE_TRANSPORT_PORT.equals(colName) && context.getSslEngineFactory().isPresent()) { + + String sslColName = colName + "_ssl"; + broadcastRpcPort = row.getInteger(sslColName); } } - // system.local for Cassandra >= 4.0 - Integer broadcastRpcPort = row.getInteger("rpc_port"); + // use the default port if no port information was found in the row; + // note that in rare situations, the default port might not be known, in which case we + // report zero, as advertised in the javadocs of Node and NodeInfo. if (broadcastRpcPort == null || broadcastRpcPort == 0) { - // system.peers_v2 - broadcastRpcPort = row.getInteger("native_port"); - if (broadcastRpcPort == null || broadcastRpcPort == 0) { - // use the default port if no port information was found in the row; - // note that in rare situations, the default port might not be known, in which case we - // report zero, as advertised in the javadocs of Node and NodeInfo. - broadcastRpcPort = port == -1 ? 0 : port; - } + + LOG.warn( + "[{}] Unable to determine broadcast RPC port. " + + "Trying to fall back to port used by the control connection.", + logPrefix); + broadcastRpcPort = port == -1 ? 0 : port; } - return new InetSocketAddress(broadcastRpcInetAddress, broadcastRpcPort); + + InetSocketAddress broadcastRpcAddress = + new InetSocketAddress(broadcastRpcInetAddress, broadcastRpcPort); + if (row.contains("peer") && broadcastRpcAddress.equals(localEndPoint.resolve())) { + // JAVA-2303: if the peer is actually the control node, ignore that peer as it is likely + // a misconfiguration problem. + LOG.warn( + "[{}] Control node {} has an entry for itself in {}: this entry will be ignored. " + + "This is likely due to a misconfiguration; please verify your rpc_address " + + "configuration in cassandra.yaml on all nodes in your cluster.", + logPrefix, + localEndPoint, + getPeerTableName()); + return null; + } + + return broadcastRpcAddress; } /** @@ -444,21 +583,16 @@ protected InetSocketAddress getBroadcastRpcAddress(@NonNull AdminRow row) { * node's broadcast RPC address and host ID; otherwise the driver may not work properly. */ protected boolean isPeerValid(AdminRow peerRow) { - boolean hasPeersRpcAddress = peerRow.getInetAddress("rpc_address") != null; - boolean hasPeersV2RpcAddress = - peerRow.getInetAddress("native_address") != null - && peerRow.getInteger("native_port") != null; - boolean hasRpcAddress = hasPeersV2RpcAddress || hasPeersRpcAddress; - boolean hasHostId = peerRow.getUuid("host_id") != null; - boolean valid = hasRpcAddress && hasHostId; - if (!valid) { + if (PeerRowValidator.isValid(peerRow)) { + return true; + } else { LOG.warn( "[{}] Found invalid row in {} for peer: {}. " + "This is likely a gossip or snitch issue, this node will be ignored.", logPrefix, - retrievePeerTableName(), + getPeerTableName(), peerRow.getInetAddress("peer")); + return false; } - return valid; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java index 638d4f3db99..5d58727484c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/DistanceEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java index 7b6aeae48e2..7388980c230 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -62,17 +64,25 @@ public Result compute( for (NodeInfo nodeInfo : nodeInfos) { UUID id = nodeInfo.getHostId(); - seen.add(id); - DefaultNode node = (DefaultNode) oldNodes.get(id); - if (node == null) { - node = new DefaultNode(nodeInfo.getEndPoint(), context); - LOG.debug("[{}] Adding new node {}", logPrefix, node); - added.put(id, node); - } - if (tokenFactory == null && nodeInfo.getPartitioner() != null) { - tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); + if (seen.contains(id)) { + LOG.warn( + "[{}] Found duplicate entries with host_id {} in system.peers, " + + "keeping only the first one", + logPrefix, + id); + } else { + seen.add(id); + DefaultNode node = (DefaultNode) oldNodes.get(id); + if (node == null) { + node = new DefaultNode(nodeInfo.getEndPoint(), context); + LOG.debug("[{}] Adding new node {}", logPrefix, node); + added.put(id, node); + } + if (tokenFactory == null && nodeInfo.getPartitioner() != null) { + tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); + } + tokensChanged |= copyInfos(nodeInfo, node, context); } - tokensChanged |= copyInfos(nodeInfo, node, tokenFactory, logPrefix); } Set removed = Sets.difference(oldNodes.keySet(), seen); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java index 96ed3b0d19e..517bfca27fa 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +18,17 @@ package com.datastax.oss.driver.internal.core.metadata; import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; import com.datastax.oss.driver.internal.core.metadata.token.TokenFactoryRegistry; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.UUID; import net.jcip.annotations.ThreadSafe; @@ -54,54 +59,67 @@ public Result compute( String logPrefix = context.getSessionName(); TokenFactoryRegistry tokenFactoryRegistry = context.getTokenFactoryRegistry(); - assert oldMetadata.getNodes().isEmpty(); - - TokenFactory tokenFactory = - oldMetadata.getTokenMap().map(m -> ((DefaultTokenMap) m).getTokenFactory()).orElse(null); - boolean tokensChanged = false; + // Since this is the first refresh, and we've stored contact points separately until now, the + // metadata is empty. + assert oldMetadata == DefaultMetadata.EMPTY; + TokenFactory tokenFactory = null; - ImmutableMap.Builder newNodesBuilder = ImmutableMap.builder(); + Map newNodes = new HashMap<>(); + // Contact point nodes don't have host ID as well as other info yet, so we fill them with node + // info found on first match by endpoint + Set matchedContactPoints = new HashSet<>(); + List addedNodes = new ArrayList<>(); for (NodeInfo nodeInfo : nodeInfos) { - EndPoint endPoint = nodeInfo.getEndPoint(); - DefaultNode node = findIn(contactPoints, endPoint); - if (node == null) { - node = new DefaultNode(endPoint, context); - LOG.debug("[{}] Adding new node {}", logPrefix, node); + UUID hostId = nodeInfo.getHostId(); + if (newNodes.containsKey(hostId)) { + LOG.warn( + "[{}] Found duplicate entries with host_id {} in system.peers, " + + "keeping only the first one {}", + logPrefix, + hostId, + newNodes.get(hostId)); } else { - LOG.debug("[{}] Copying contact point {}", logPrefix, node); - } - if (tokenFactory == null && nodeInfo.getPartitioner() != null) { - tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); + EndPoint endPoint = nodeInfo.getEndPoint(); + DefaultNode contactPointNode = findContactPointNode(endPoint); + DefaultNode node; + if (contactPointNode == null || matchedContactPoints.contains(endPoint)) { + node = new DefaultNode(endPoint, context); + addedNodes.add(node); + LOG.debug("[{}] Adding new node {}", logPrefix, node); + } else { + matchedContactPoints.add(contactPointNode.getEndPoint()); + node = contactPointNode; + LOG.debug("[{}] Copying contact point {}", logPrefix, node); + } + if (tokenMapEnabled && tokenFactory == null && nodeInfo.getPartitioner() != null) { + tokenFactory = tokenFactoryRegistry.tokenFactoryFor(nodeInfo.getPartitioner()); + } + copyInfos(nodeInfo, node, context); + newNodes.put(hostId, node); } - tokensChanged |= copyInfos(nodeInfo, node, tokenFactory, logPrefix); - newNodesBuilder.put(node.getHostId(), node); } - ImmutableMap newNodes = newNodesBuilder.build(); ImmutableList.Builder eventsBuilder = ImmutableList.builder(); - - for (DefaultNode newNode : newNodes.values()) { - if (!contactPoints.contains(newNode)) { - eventsBuilder.add(NodeStateEvent.added(newNode)); - } + for (DefaultNode addedNode : addedNodes) { + eventsBuilder.add(NodeStateEvent.added(addedNode)); } for (DefaultNode contactPoint : contactPoints) { - if (findIn(newNodes.values(), contactPoint.getEndPoint()) == null) { + if (!matchedContactPoints.contains(contactPoint.getEndPoint())) { eventsBuilder.add(NodeStateEvent.removed(contactPoint)); } } return new Result( oldMetadata.withNodes( - ImmutableMap.copyOf(newNodes), tokenMapEnabled, tokensChanged, tokenFactory, context), + ImmutableMap.copyOf(newNodes), tokenMapEnabled, true, tokenFactory, context), eventsBuilder.build()); } - private DefaultNode findIn(Iterable nodes, EndPoint endPoint) { - for (Node node : nodes) { + private DefaultNode findContactPointNode(EndPoint endPoint) { + for (DefaultNode node : contactPoints) { if (node.getEndPoint().equals(endPoint)) { - return (DefaultNode) node; + return node; } } return null; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java index bdf0c392e0c..5c8473a3b67 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,6 +38,7 @@ import java.util.Map; import java.util.Queue; import java.util.Set; +import java.util.WeakHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; @@ -84,7 +87,7 @@ private enum State { private final String logPrefix; private final ReplayingEventFilter eventFilter = new ReplayingEventFilter<>(this::processNodeStateEvent); - private AtomicReference stateRef = new AtomicReference<>(State.BEFORE_INIT); + private final AtomicReference stateRef = new AtomicReference<>(State.BEFORE_INIT); public LoadBalancingPolicyWrapper( @NonNull InternalDriverContext context, @@ -103,7 +106,7 @@ public LoadBalancingPolicyWrapper( // Just an alias to make the rest of the code more readable this.policies = reporters.keySet(); - this.distances = new HashMap<>(); + this.distances = new WeakHashMap<>(); this.logPrefix = context.getSessionName(); context.getEventBus().register(NodeStateEvent.class, this::onNodeStateEvent); @@ -170,6 +173,7 @@ private void onNodeStateEvent(NodeStateEvent event) { // once it has gone through the filter private void processNodeStateEvent(NodeStateEvent event) { + DefaultNode node = event.node; switch (stateRef.get()) { case BEFORE_INIT: case DURING_INIT: @@ -179,13 +183,13 @@ private void processNodeStateEvent(NodeStateEvent event) { case RUNNING: for (LoadBalancingPolicy policy : policies) { if (event.newState == NodeState.UP) { - policy.onUp(event.node); + policy.onUp(node); } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { - policy.onDown(event.node); + policy.onDown(node); } else if (event.newState == NodeState.UNKNOWN) { - policy.onAdd(event.node); + policy.onAdd(node); } else if (event.newState == null) { - policy.onRemove(event.node); + policy.onRemove(node); } else { LOG.warn("[{}] Unsupported event: {}", logPrefix, event); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java index 2c4dcfa7f3e..efb04bde5e1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataManager.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,6 +27,7 @@ import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.control.ControlConnection; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; +import com.datastax.oss.driver.internal.core.metadata.schema.queries.KeyspaceFilter; import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; @@ -65,6 +68,7 @@ public class MetadataManager implements AsyncAutoCloseable { private volatile DefaultMetadata metadata; // only updated from adminExecutor private volatile boolean schemaEnabledInConfig; private volatile List refreshedKeyspaces; + private volatile KeyspaceFilter keyspaceFilter; private volatile Boolean schemaEnabledProgrammatically; private volatile boolean tokenMapEnabled; private volatile Set contactPoints; @@ -86,6 +90,7 @@ protected MetadataManager(InternalDriverContext context, DefaultMetadata initial this.refreshedKeyspaces = config.getStringList( DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); + this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); this.tokenMapEnabled = config.getBoolean(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED); context.getEventBus().register(ConfigChangeEvent.class, this::onConfigChanged); @@ -100,13 +105,25 @@ private void onConfigChanged(@SuppressWarnings("unused") ConfigChangeEvent event this.refreshedKeyspaces = config.getStringList( DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); + this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); this.tokenMapEnabled = config.getBoolean(DefaultDriverOption.METADATA_TOKEN_MAP_ENABLED); if ((!schemaEnabledBefore || !keyspacesBefore.equals(refreshedKeyspaces) || (!tokenMapEnabledBefore && tokenMapEnabled)) && isSchemaEnabled()) { - refreshSchema(null, false, true); + refreshSchema(null, false, true) + .whenComplete( + (metadata, error) -> { + if (error != null) { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error while refreshing schema after it was re-enabled " + + "in the configuration, keeping previous version", + logPrefix, + error); + } + }); } } @@ -163,7 +180,7 @@ public CompletionStage refreshNode(Node node) { maybeInfo -> { if (maybeInfo.isPresent()) { boolean tokensChanged = - NodesRefresh.copyInfos(maybeInfo.get(), (DefaultNode) node, null, logPrefix); + NodesRefresh.copyInfos(maybeInfo.get(), (DefaultNode) node, context); if (tokensChanged) { apply(new TokensChangedRefresh()); } @@ -210,15 +227,40 @@ public void removeNode(InetSocketAddress broadcastRpcAddress) { * @param flushNow bypass the debouncer and force an immediate refresh (used to avoid a delay at * startup) */ - public CompletionStage refreshSchema( + public CompletionStage refreshSchema( String keyspace, boolean evenIfDisabled, boolean flushNow) { - CompletableFuture future = new CompletableFuture<>(); + CompletableFuture future = new CompletableFuture<>(); RunOrSchedule.on( adminExecutor, () -> singleThreaded.refreshSchema(keyspace, evenIfDisabled, flushNow, future)); return future; } + public static class RefreshSchemaResult { + private final Metadata metadata; + private final boolean isSchemaInAgreement; + + public RefreshSchemaResult(Metadata metadata, boolean isSchemaInAgreement) { + this.metadata = metadata; + this.isSchemaInAgreement = isSchemaInAgreement; + } + + public RefreshSchemaResult(Metadata metadata) { + this( + metadata, + // This constructor is used in corner cases where agreement doesn't matter + true); + } + + public Metadata getMetadata() { + return metadata; + } + + public boolean isSchemaInAgreement() { + return isSchemaInAgreement; + } + } + public boolean isSchemaEnabled() { return (schemaEnabledProgrammatically != null) ? schemaEnabledProgrammatically @@ -229,20 +271,12 @@ public CompletionStage setSchemaEnabled(Boolean newValue) { boolean wasEnabledBefore = isSchemaEnabled(); schemaEnabledProgrammatically = newValue; if (!wasEnabledBefore && isSchemaEnabled()) { - return refreshSchema(null, false, true); + return refreshSchema(null, false, true).thenApply(RefreshSchemaResult::getMetadata); } else { return CompletableFuture.completedFuture(metadata); } } - /** - * Returns a future that completes after the first schema refresh attempt, whether that attempt - * succeeded or not (we wait for that refresh at init, but if it fails it's not fatal). - */ - public CompletionStage firstSchemaRefreshFuture() { - return singleThreaded.firstSchemaRefreshFuture; - } - @NonNull @Override public CompletionStage closeFuture() { @@ -266,21 +300,23 @@ private class SingleThreaded { private final CompletableFuture closeFuture = new CompletableFuture<>(); private boolean closeWasCalled; private final CompletableFuture firstSchemaRefreshFuture = new CompletableFuture<>(); - private final Debouncer, CompletableFuture> + private final Debouncer< + CompletableFuture, CompletableFuture> schemaRefreshDebouncer; private final SchemaQueriesFactory schemaQueriesFactory; private final SchemaParserFactory schemaParserFactory; // We don't allow concurrent schema refreshes. If one is already running, the next one is queued // (and the ones after that are merged with the queued one). - private CompletableFuture currentSchemaRefresh; - private CompletableFuture queuedSchemaRefresh; + private CompletableFuture currentSchemaRefresh; + private CompletableFuture queuedSchemaRefresh; private boolean didFirstNodeListRefresh; private SingleThreaded(InternalDriverContext context, DriverExecutionProfile config) { this.schemaRefreshDebouncer = new Debouncer<>( + logPrefix + "|metadata debouncer", adminExecutor, this::coalesceSchemaRequests, this::startSchemaRequest, @@ -333,32 +369,32 @@ private void refreshSchema( String keyspace, boolean evenIfDisabled, boolean flushNow, - CompletableFuture future) { + CompletableFuture future) { if (!didFirstNodeListRefresh) { // This happen if the control connection receives a schema event during init. We can't // refresh yet because we don't know the nodes' versions, simply ignore. - future.complete(metadata); + future.complete(new RefreshSchemaResult(metadata)); return; } // If this is an event, make sure it's not targeting a keyspace that we're ignoring. - boolean isRefreshedKeyspace = - keyspace == null || refreshedKeyspaces.isEmpty() || refreshedKeyspaces.contains(keyspace); + boolean isRefreshedKeyspace = keyspace == null || keyspaceFilter.includes(keyspace); if (isRefreshedKeyspace && (evenIfDisabled || isSchemaEnabled())) { acceptSchemaRequest(future, flushNow); } else { - future.complete(metadata); + future.complete(new RefreshSchemaResult(metadata)); singleThreaded.firstSchemaRefreshFuture.complete(null); } } // An external component has requested a schema refresh, feed it to the debouncer. - private void acceptSchemaRequest(CompletableFuture future, boolean flushNow) { + private void acceptSchemaRequest( + CompletableFuture future, boolean flushNow) { assert adminExecutor.inEventLoop(); if (closeWasCalled) { - future.complete(metadata); + future.complete(new RefreshSchemaResult(metadata)); } else { schemaRefreshDebouncer.receive(future); if (flushNow) { @@ -368,13 +404,13 @@ private void acceptSchemaRequest(CompletableFuture future, boolean flu } // Multiple requests have arrived within the debouncer window, coalesce them. - private CompletableFuture coalesceSchemaRequests( - List> futures) { + private CompletableFuture coalesceSchemaRequests( + List> futures) { assert adminExecutor.inEventLoop(); assert !futures.isEmpty(); // Keep only one, but ensure that the discarded ones will still be completed when we're done - CompletableFuture result = null; - for (CompletableFuture future : futures) { + CompletableFuture result = null; + for (CompletableFuture future : futures) { if (result == null) { result = future; } else { @@ -385,74 +421,82 @@ private CompletableFuture coalesceSchemaRequests( } // The debouncer has flushed, start the actual work. - private void startSchemaRequest(CompletableFuture future) { + private void startSchemaRequest(CompletableFuture refreshFuture) { assert adminExecutor.inEventLoop(); if (closeWasCalled) { - future.complete(metadata); + refreshFuture.complete(new RefreshSchemaResult(metadata)); return; } if (currentSchemaRefresh == null) { - currentSchemaRefresh = future; + currentSchemaRefresh = refreshFuture; LOG.debug("[{}] Starting schema refresh", logPrefix); - maybeInitControlConnection() + initControlConnectionForSchema() .thenCompose(v -> context.getTopologyMonitor().checkSchemaAgreement()) - // 1. Query system tables - .thenCompose(b -> schemaQueriesFactory.newInstance(future).execute()) - // 2. Parse the rows into metadata objects, put them in a MetadataRefresh - // 3. Apply the MetadataRefresh - .thenApplyAsync(this::parseAndApplySchemaRows, adminExecutor) .whenComplete( - (v, error) -> { - if (error != null) { - Loggers.warnWithException( - LOG, - "[{}] Unexpected error while refreshing schema, skipping", - logPrefix, - error); + (schemaInAgreement, agreementError) -> { + if (agreementError != null) { + refreshFuture.completeExceptionally(agreementError); + } else { + try { + schemaQueriesFactory + .newInstance() + .execute() + .thenApplyAsync(this::parseAndApplySchemaRows, adminExecutor) + .whenComplete( + (newMetadata, metadataError) -> { + if (metadataError != null) { + refreshFuture.completeExceptionally(metadataError); + } else { + refreshFuture.complete( + new RefreshSchemaResult(newMetadata, schemaInAgreement)); + } + + firstSchemaRefreshFuture.complete(null); + + currentSchemaRefresh = null; + // If another refresh was enqueued during this one, run it now + if (queuedSchemaRefresh != null) { + CompletableFuture tmp = + this.queuedSchemaRefresh; + this.queuedSchemaRefresh = null; + startSchemaRequest(tmp); + } + }); + } catch (Throwable t) { + LOG.debug("[{}] Exception getting new metadata", logPrefix, t); + refreshFuture.completeExceptionally(t); + } } - singleThreaded.firstSchemaRefreshFuture.complete(null); }); } else if (queuedSchemaRefresh == null) { - queuedSchemaRefresh = future; // wait for our turn + queuedSchemaRefresh = refreshFuture; // wait for our turn } else { - CompletableFutures.completeFrom(queuedSchemaRefresh, future); // join the queued request + CompletableFutures.completeFrom( + queuedSchemaRefresh, refreshFuture); // join the queued request } } - // The control connection may or may not have been initialized already by TopologyMonitor. - private CompletionStage maybeInitControlConnection() { + // To query schema tables, we need the control connection. + // Normally that the topology monitor has already initialized it to query node tables. But if a + // custom topology monitor is in place, it might not use the control connection at all. + private CompletionStage initControlConnectionForSchema() { if (firstSchemaRefreshFuture.isDone()) { - // Not the first schema refresh, so we know init was attempted already + // We tried to refresh the schema before, so we know we called init already. Don't call it + // again since that is cheaper. return firstSchemaRefreshFuture; } else { - controlConnection.init(false, true, false); - // The control connection might fail to connect and reattempt, but for the metadata refresh - // that led us here we only care about the first attempt (metadata is not vital, so if we - // can't get it right now it's OK to move on) - return controlConnection.firstConnectionAttemptFuture(); + // Trigger init (a no-op if the topology monitor already done so) + return controlConnection.init(false, true, false); } } - private Void parseAndApplySchemaRows(SchemaRows schemaRows) { + private Metadata parseAndApplySchemaRows(SchemaRows schemaRows) { assert adminExecutor.inEventLoop(); - assert schemaRows.refreshFuture() == currentSchemaRefresh; - try { - SchemaRefresh schemaRefresh = schemaParserFactory.newInstance(schemaRows).parse(); - long start = System.nanoTime(); - apply(schemaRefresh); - currentSchemaRefresh.complete(metadata); - LOG.debug( - "[{}] Applying schema refresh took {}", logPrefix, NanoTime.formatTimeSince(start)); - } catch (Throwable t) { - currentSchemaRefresh.completeExceptionally(t); - } - currentSchemaRefresh = null; - if (queuedSchemaRefresh != null) { - CompletableFuture tmp = this.queuedSchemaRefresh; - this.queuedSchemaRefresh = null; - startSchemaRequest(tmp); - } - return null; + SchemaRefresh schemaRefresh = schemaParserFactory.newInstance(schemaRows).parse(); + long start = System.nanoTime(); + apply(schemaRefresh); + LOG.debug("[{}] Applying schema refresh took {}", logPrefix, NanoTime.formatTimeSince(start)); + return metadata; } private void close() { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java index aab77f2a756..fc31f317622 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MetadataRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java new file mode 100644 index 00000000000..8ee6d04bbae --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListener.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.util.Loggers; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Combines multiple node state listeners into a single one. + * + *

      Any exception thrown by a child listener is caught and logged. + */ +@ThreadSafe +public class MultiplexingNodeStateListener implements NodeStateListener { + + private static final Logger LOG = LoggerFactory.getLogger(MultiplexingNodeStateListener.class); + + private final List listeners = new CopyOnWriteArrayList<>(); + + public MultiplexingNodeStateListener() {} + + public MultiplexingNodeStateListener(NodeStateListener... listeners) { + this(Arrays.asList(listeners)); + } + + public MultiplexingNodeStateListener(Collection listeners) { + addListeners(listeners); + } + + private void addListeners(Collection source) { + for (NodeStateListener listener : source) { + addListener(listener); + } + } + + private void addListener(NodeStateListener toAdd) { + Objects.requireNonNull(toAdd, "listener cannot be null"); + if (toAdd instanceof MultiplexingNodeStateListener) { + addListeners(((MultiplexingNodeStateListener) toAdd).listeners); + } else { + listeners.add(toAdd); + } + } + + public void register(@NonNull NodeStateListener listener) { + addListener(listener); + } + + @Override + public void onAdd(@NonNull Node node) { + invokeListeners(listener -> listener.onAdd(node), "onAdd"); + } + + @Override + public void onUp(@NonNull Node node) { + invokeListeners(listener -> listener.onUp(node), "onUp"); + } + + @Override + public void onDown(@NonNull Node node) { + invokeListeners(listener -> listener.onDown(node), "onDown"); + } + + @Override + public void onRemove(@NonNull Node node) { + invokeListeners(listener -> listener.onRemove(node), "onRemove"); + } + + @Override + public void onSessionReady(@NonNull Session session) { + invokeListeners(listener -> listener.onSessionReady(session), "onSessionReady"); + } + + @Override + public void close() throws Exception { + for (NodeStateListener listener : listeners) { + try { + listener.close(); + } catch (Exception e) { + Loggers.warnWithException( + LOG, "Unexpected error while closing node state listener {}.", listener, e); + } + } + } + + private void invokeListeners(@NonNull Consumer action, String event) { + for (NodeStateListener listener : listeners) { + try { + action.accept(listener); + } catch (Exception e) { + Loggers.warnWithException( + LOG, + "Unexpected error while notifying node state listener {} of an {} event.", + listener, + event, + e); + } + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java index 20aac59941a..6a9651d8376 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeInfo.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,7 +37,12 @@ */ public interface NodeInfo { - /** The endpoint that the driver will use to connect to the node. */ + /** + * The endpoint that the driver will use to connect to the node. + * + *

      This information is required; the driver will not function properly if this method returns + * {@code null}. + */ @NonNull EndPoint getEndPoint(); @@ -124,6 +131,9 @@ public interface NodeInfo { /** * An additional map of free-form properties, that can be used by custom implementations. They * will be copied as-is into {@link Node#getExtras()}. + * + *

      This is not required; if you don't have anything specific to report here, it can be null or + * empty. */ @Nullable Map getExtras(); @@ -138,7 +148,14 @@ public interface NodeInfo { @NonNull UUID getHostId(); - /** The current version that is associated with the nodes schema. */ + /** + * The current version that is associated with the node's schema. + * + *

      This is not required; the driver reports it in {@link Node#getSchemaVersion()}, but for + * informational purposes only. It is not used anywhere internally (schema agreement is checked + * with {@link TopologyMonitor#checkSchemaAgreement()}, which by default queries system tables + * directly, not this field). + */ @Nullable UUID getSchemaVersion(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java index 8a5d9e54f48..2f5c3c1d230 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,12 @@ import java.util.Objects; import net.jcip.annotations.Immutable; +/** + * The transition of a node from one {@linkplain NodeState state} to another. + * + *

      For simplicity, this is also used to represent a node addition ({@code oldState=null, + * newState=UNKNOWN}) or removal ({@code oldState=newState=null}). + */ @Immutable public class NodeStateEvent { public static NodeStateEvent changed(NodeState oldState, NodeState newState, DefaultNode node) { @@ -36,8 +44,15 @@ public static NodeStateEvent removed(DefaultNode node) { return new NodeStateEvent(null, null, node); } + /** The state before the change, or {@code null} if this is an addition or a removal. */ public final NodeState oldState; + + /** + * The state after the change ({@link NodeState#UNKNOWN} if the node was just added), or {@code + * null} if this is a removal. + */ public final NodeState newState; + public final DefaultNode node; private NodeStateEvent(NodeState oldState, NodeState newState, DefaultNode node) { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java index b2f264f30f9..c8a52e4fa00 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManager.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -103,6 +105,7 @@ private SingleThreaded(InternalDriverContext context) { DriverExecutionProfile config = context.getConfig().getDefaultProfile(); this.topologyEventDebouncer = new Debouncer<>( + logPrefix + "|topology debouncer", adminExecutor, this::coalesceTopologyEvents, this::flushTopologyEvents, @@ -124,7 +127,7 @@ private void markInitialized() { } // Updates to DefaultNode's volatile fields are confined to the admin thread - @SuppressWarnings("NonAtomicVolatileUpdate") + @SuppressWarnings({"NonAtomicVolatileUpdate", "NonAtomicOperationOnVolatileField"}) private void onChannelEvent(ChannelEvent event) { assert adminExecutor.inEventLoop(); if (closeWasCalled) { @@ -184,10 +187,10 @@ private void onDebouncedTopologyEvent(TopologyEvent event) { } } else { LOG.debug( - "[{}] Received UP event for unknown node {}, adding it", + "[{}] Received UP event for unknown node {}, refreshing node list", logPrefix, event.broadcastRpcAddress); - metadataManager.addNode(event.broadcastRpcAddress); + metadataManager.refreshNodes(); } break; case SUGGEST_DOWN: @@ -333,8 +336,8 @@ private void setState(DefaultNode node, NodeState newState, String reason) { (success, error) -> { try { if (error != null) { - LOG.debug( - "[{}] Error while refreshing info for {}", logPrefix, node, error); + Loggers.warnWithException( + LOG, "[{}] Error while refreshing info for {}", logPrefix, node, error); } // Fire the event whether the refresh succeeded or not eventBus.fire(NodeStateEvent.changed(oldState, newState, node)); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java index 664db194dcd..befb55e3740 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NodesRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +18,7 @@ package com.datastax.oss.driver.internal.core.metadata; import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import java.util.Collections; import java.util.Objects; @@ -34,7 +36,9 @@ abstract class NodesRefresh implements MetadataRefresh { * mutate the tokens in-place, so there is no way to check this after the fact). */ protected static boolean copyInfos( - NodeInfo nodeInfo, DefaultNode node, TokenFactory tokenFactory, String logPrefix) { + NodeInfo nodeInfo, DefaultNode node, InternalDriverContext context) { + + node.setEndPoint(nodeInfo.getEndPoint(), context); node.broadcastRpcAddress = nodeInfo.getBroadcastRpcAddress().orElse(null); node.broadcastAddress = nodeInfo.getBroadcastAddress().orElse(null); node.listenAddress = nodeInfo.getListenAddress().orElse(null); @@ -48,11 +52,11 @@ protected static boolean copyInfos( } catch (IllegalArgumentException e) { LOG.warn( "[{}] Error converting Cassandra version '{}' for {}", - logPrefix, + context.getSessionName(), versionString, node.getEndPoint()); } - boolean tokensChanged = tokenFactory != null && !node.rawTokens.equals(nodeInfo.getTokens()); + boolean tokensChanged = !node.rawTokens.equals(nodeInfo.getTokens()); if (tokensChanged) { node.rawTokens = nodeInfo.getTokens(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java index 2e70d8efb6a..b879e1f2104 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/NoopNodeStateListener.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,29 +18,12 @@ package com.datastax.oss.driver.internal.core.metadata; import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; import com.datastax.oss.driver.api.core.metadata.NodeStateListenerBase; -import com.datastax.oss.driver.api.core.session.SessionBuilder; import net.jcip.annotations.ThreadSafe; /** - * Default node state listener implementation with empty methods. - * - *

      To activate this listener, modify the {@code advanced.node-state-listener} section in the - * driver configuration, for example: - * - *

      - * datastax-java-driver {
      - *   advanced.node-state-listener {
      - *     class = NoopNodeStateListener
      - *   }
      - * }
      - * 
      - * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

      Note that if a listener is specified programmatically with {@link - * SessionBuilder#withNodeStateListener(NodeStateListener)}, the configuration is ignored. + * Default node state listener implementation with empty methods. This implementation is used when + * no listeners were registered, neither programmatically nor through the configuration. */ @ThreadSafe public class NoopNodeStateListener extends NodeStateListenerBase { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java new file mode 100644 index 00000000000..4782d72abbb --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidator.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datastax.oss.driver.internal.core.metadata; + +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class PeerRowValidator { + + /** Returns {@code true} if the given peer row is valid, and {@code false} otherwise. */ + public static boolean isValid(@NonNull AdminRow peerRow) { + + boolean hasPeersRpcAddress = !peerRow.isNull("rpc_address"); + boolean hasPeersV2RpcAddress = + !peerRow.isNull("native_address") && !peerRow.isNull("native_port"); + boolean hasRpcAddress = hasPeersRpcAddress || hasPeersV2RpcAddress; + + return hasRpcAddress + && !peerRow.isNull("host_id") + && !peerRow.isNull("data_center") + && !peerRow.isNull("rack") + && !peerRow.isNull("tokens") + && !peerRow.isNull("schema_version"); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java index 5135d04fda4..46de1989278 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java index 6f04d9244a6..c5935dba4bb 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementChecker.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,9 +32,9 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.time.Duration; -import java.util.Collections; import java.util.Iterator; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -106,8 +108,7 @@ private void sendQueries() { } else { CompletionStage localQuery = query("SELECT schema_version FROM system.local WHERE key='local'"); - CompletionStage peersQuery = - query("SELECT host_id, schema_version FROM system.peers"); + CompletionStage peersQuery = query("SELECT * FROM system.peers"); localQuery .thenCombine(peersQuery, this::extractSchemaVersions) @@ -143,31 +144,10 @@ private Set extractSchemaVersions(AdminResult controlNodeResult, AdminResu Map nodes = context.getMetadataManager().getMetadata().getNodes(); for (AdminRow peerRow : peersResult) { - UUID hostId = peerRow.getUuid("host_id"); - if (hostId == null) { - LOG.warn( - "[{}] Missing host_id in system.peers row, excluding from schema agreement check", - logPrefix); - continue; - } - UUID schemaVersion = peerRow.getUuid("schema_version"); - if (schemaVersion == null) { - LOG.warn( - "[{}] Missing schema_version in system.peers row for {}, " - + "excluding from schema agreement check", - logPrefix, - hostId); - continue; - } - Node node = nodes.get(hostId); - if (node == null) { - LOG.warn("[{}] Unknown peer {}, excluding from schema agreement check", logPrefix, hostId); - continue; - } else if (node.getState() != NodeState.UP) { - LOG.debug("[{}] Peer {} is down, excluding from schema agreement check", logPrefix, hostId); - continue; + if (isPeerValid(peerRow, nodes)) { + UUID schemaVersion = Objects.requireNonNull(peerRow.getUuid("schema_version")); + schemaVersions.add(schemaVersion); } - schemaVersions.add(schemaVersion); } return schemaVersions.build(); } @@ -205,12 +185,28 @@ private void completeOrReschedule(Set uuids, Throwable error) { @VisibleForTesting protected CompletionStage query(String queryString) { return AdminRequestHandler.query( - channel, - queryString, - Collections.emptyMap(), - queryTimeout, - INFINITE_PAGE_SIZE, - logPrefix) + channel, queryString, queryTimeout, INFINITE_PAGE_SIZE, logPrefix) .start(); } + + protected boolean isPeerValid(AdminRow peerRow, Map nodes) { + if (PeerRowValidator.isValid(peerRow)) { + UUID hostId = peerRow.getUuid("host_id"); + Node node = nodes.get(hostId); + if (node == null) { + LOG.warn("[{}] Unknown peer {}, excluding from schema agreement check", logPrefix, hostId); + return false; + } else if (node.getState() != NodeState.UP) { + LOG.debug("[{}] Peer {} is down, excluding from schema agreement check", logPrefix, hostId); + return false; + } + return true; + } else { + LOG.warn( + "[{}] Found invalid system.peers row for peer: {}, excluding from schema agreement check.", + logPrefix, + peerRow.getInetAddress("peer")); + return false; + } + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java new file mode 100644 index 00000000000..d1ab8eec98d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/SniEndPoint.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; + +public class SniEndPoint implements EndPoint { + private static final AtomicInteger OFFSET = new AtomicInteger(); + + private final InetSocketAddress proxyAddress; + private final String serverName; + + /** + * @param proxyAddress the address of the proxy. If it is {@linkplain + * InetSocketAddress#isUnresolved() unresolved}, each call to {@link #resolve()} will + * re-resolve it, fetch all of its A-records, and if there are more than 1 pick one in a + * round-robin fashion. + * @param serverName the SNI server name. In the context of Cloud, this is the string + * representation of the host id. + */ + public SniEndPoint(InetSocketAddress proxyAddress, String serverName) { + this.proxyAddress = Objects.requireNonNull(proxyAddress, "SNI address cannot be null"); + this.serverName = Objects.requireNonNull(serverName, "SNI Server name cannot be null"); + } + + public String getServerName() { + return serverName; + } + + @NonNull + @Override + public InetSocketAddress resolve() { + try { + InetAddress[] aRecords = InetAddress.getAllByName(proxyAddress.getHostName()); + if (aRecords.length == 0) { + // Probably never happens, but the JDK docs don't explicitly say so + throw new IllegalArgumentException( + "Could not resolve proxy address " + proxyAddress.getHostName()); + } + // The order of the returned address is unspecified. Sort by IP to make sure we get a true + // round-robin + Arrays.sort(aRecords, IP_COMPARATOR); + int index = + (aRecords.length == 1) + ? 0 + : OFFSET.getAndUpdate(x -> x == Integer.MAX_VALUE ? 0 : x + 1) % aRecords.length; + return new InetSocketAddress(aRecords[index], proxyAddress.getPort()); + } catch (UnknownHostException e) { + throw new IllegalArgumentException( + "Could not resolve proxy address " + proxyAddress.getHostName(), e); + } + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof SniEndPoint) { + SniEndPoint that = (SniEndPoint) other; + return this.proxyAddress.equals(that.proxyAddress) && this.serverName.equals(that.serverName); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(proxyAddress, serverName); + } + + @Override + public String toString() { + // Note that this uses the original proxy address, so if there are multiple A-records it won't + // show which one was selected. If that turns out to be a problem for debugging, we might need + // to store the result of resolve() in Connection and log that instead of the endpoint. + return proxyAddress.toString() + ":" + serverName; + } + + @NonNull + @Override + public String asMetricPrefix() { + String hostString = proxyAddress.getHostString(); + if (hostString == null) { + throw new IllegalArgumentException( + "Could not extract a host string from provided proxy address " + proxyAddress); + } + return hostString.replace('.', '_') + ':' + proxyAddress.getPort() + '_' + serverName; + } + + @SuppressWarnings("UnnecessaryLambda") + private static final Comparator IP_COMPARATOR = + (InetAddress address1, InetAddress address2) -> + UnsignedBytes.lexicographicalComparator() + .compare(address1.getAddress(), address2.getAddress()); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java index 174ed029d7a..6f60e9a790b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TokensChangedRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java index 4f11fa4b182..c7ea8c93088 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java index d01ae3d954f..e7741f11196 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/TopologyMonitor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,7 +46,10 @@ public interface TopologyMonitor extends AsyncAutoCloseable { * *

      The completion of the future returned by this method marks the point when the driver * considers itself "connected" to the cluster, and proceeds with the rest of the initialization: - * refreshing the list of nodes and the metadata, opening connection pools, etc. + * refreshing the list of nodes and the metadata, opening connection pools, etc. By then, the + * topology monitor should be ready to accept calls to its other methods; in particular, {@link + * #refreshNodeList()} will be called shortly after the completion of the future, to load the + * initial list of nodes to connect to. * *

      If {@code advanced.reconnect-on-init = true} in the configuration, this method is * responsible for handling reconnection. That is, if the initial attempt to "connect" to the diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java index c1d62ca0d1d..669f925af65 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultAggregateMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,6 +24,7 @@ import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; import java.util.Objects; import java.util.Optional; import net.jcip.annotations.Immutable; @@ -29,18 +32,20 @@ import org.slf4j.LoggerFactory; @Immutable -public class DefaultAggregateMetadata implements AggregateMetadata { +public class DefaultAggregateMetadata implements AggregateMetadata, Serializable { private static final Logger LOG = LoggerFactory.getLogger(DefaultAggregateMetadata.class); + private static final long serialVersionUID = 1; + @NonNull private final CqlIdentifier keyspace; @NonNull private final FunctionSignature signature; @Nullable private final FunctionSignature finalFuncSignature; @Nullable private final Object initCond; + @Nullable private final String formattedInitCond; @NonNull private final DataType returnType; @NonNull private final FunctionSignature stateFuncSignature; @NonNull private final DataType stateType; - @NonNull private final TypeCodec stateTypeCodec; public DefaultAggregateMetadata( @NonNull CqlIdentifier keyspace, @@ -55,10 +60,10 @@ public DefaultAggregateMetadata( this.signature = signature; this.finalFuncSignature = finalFuncSignature; this.initCond = initCond; + this.formattedInitCond = computeFormattedInitCond(initCond, stateTypeCodec); this.returnType = returnType; this.stateFuncSignature = stateFuncSignature; this.stateType = stateType; - this.stateTypeCodec = stateTypeCodec; } @NonNull @@ -106,18 +111,7 @@ public DataType getStateType() { @NonNull @Override public Optional formatInitCond() { - if (initCond == null) { - return Optional.empty(); - } - try { - return Optional.of(stateTypeCodec.format(initCond)); - } catch (Throwable t) { - LOG.warn( - String.format( - "Failed to format INITCOND for %s.%s, using toString instead", - keyspace.asInternal(), signature.getName().asInternal())); - return Optional.of(initCond.toString()); - } + return Optional.ofNullable(this.formattedInitCond); } @Override @@ -160,4 +154,22 @@ public String toString() { + signature + ")"; } + + @Nullable + private String computeFormattedInitCond( + @Nullable Object initCond, @NonNull TypeCodec stateTypeCodec) { + + if (initCond == null) { + return null; + } + try { + return stateTypeCodec.format(initCond); + } catch (Throwable t) { + LOG.warn( + String.format( + "Failed to format INITCOND for %s.%s, using toString instead", + keyspace.asInternal(), signature.getName().asInternal())); + return initCond.toString(); + } + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java index aecb40e7329..3d0c6209880 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultColumnMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,11 +21,15 @@ import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; import com.datastax.oss.driver.api.core.type.DataType; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.util.Objects; import net.jcip.annotations.Immutable; @Immutable -public class DefaultColumnMetadata implements ColumnMetadata { +public class DefaultColumnMetadata implements ColumnMetadata, Serializable { + + private static final long serialVersionUID = 1; + @NonNull private final CqlIdentifier keyspace; @NonNull private final CqlIdentifier parent; @NonNull private final CqlIdentifier name; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java index bfed800046d..75b343d77b1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultFunctionMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,12 +23,15 @@ import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.util.List; import java.util.Objects; import net.jcip.annotations.Immutable; @Immutable -public class DefaultFunctionMetadata implements FunctionMetadata { +public class DefaultFunctionMetadata implements FunctionMetadata, Serializable { + + private static final long serialVersionUID = 1; @NonNull private final CqlIdentifier keyspace; @NonNull private final FunctionSignature signature; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java index 3fbaeff34b6..8ff0263fcc8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultIndexMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,12 +21,15 @@ import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.util.Map; import java.util.Objects; import net.jcip.annotations.Immutable; @Immutable -public class DefaultIndexMetadata implements IndexMetadata { +public class DefaultIndexMetadata implements IndexMetadata, Serializable { + + private static final long serialVersionUID = 1; @NonNull private final CqlIdentifier keyspace; @NonNull private final CqlIdentifier table; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java index cb354b583ed..3d443dd8c16 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultKeyspaceMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,12 +26,15 @@ import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; import com.datastax.oss.driver.api.core.type.UserDefinedType; import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.Serializable; import java.util.Map; import java.util.Objects; import net.jcip.annotations.Immutable; @Immutable -public class DefaultKeyspaceMetadata implements KeyspaceMetadata { +public class DefaultKeyspaceMetadata implements KeyspaceMetadata, Serializable { + + private static final long serialVersionUID = 1; @NonNull private final CqlIdentifier name; private final boolean durableWrites; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java index 479067ce0ba..4c339f89299 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultTableMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,6 +24,7 @@ import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; import java.util.List; import java.util.Map; import java.util.Objects; @@ -30,7 +33,9 @@ import net.jcip.annotations.Immutable; @Immutable -public class DefaultTableMetadata implements TableMetadata { +public class DefaultTableMetadata implements TableMetadata, Serializable { + + private static final long serialVersionUID = 1; @NonNull private final CqlIdentifier keyspace; @NonNull private final CqlIdentifier name; @@ -139,7 +144,8 @@ public boolean equals(Object other) { && Objects.equals(this.partitionKey, that.getPartitionKey()) && Objects.equals(this.clusteringColumns, that.getClusteringColumns()) && Objects.equals(this.columns, that.getColumns()) - && Objects.equals(this.indexes, that.getIndexes()); + && Objects.equals(this.indexes, that.getIndexes()) + && Objects.equals(this.options, that.getOptions()); } else { return false; } @@ -156,7 +162,8 @@ public int hashCode() { partitionKey, clusteringColumns, columns, - indexes); + indexes, + options); } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java index 53d50931546..2c5e5a9603e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/DefaultViewMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.Serializable; import java.util.List; import java.util.Map; import java.util.Objects; @@ -31,7 +34,9 @@ import net.jcip.annotations.Immutable; @Immutable -public class DefaultViewMetadata implements ViewMetadata { +public class DefaultViewMetadata implements ViewMetadata, Serializable { + + private static final long serialVersionUID = 1; @NonNull private final CqlIdentifier keyspace; @NonNull private final CqlIdentifier name; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java new file mode 100644 index 00000000000..eebe16364d1 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListener.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema; + +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.util.Loggers; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Combines multiple schema change listeners into a single one. + * + *

      Any exception thrown by a child listener is caught and logged. + */ +@ThreadSafe +public class MultiplexingSchemaChangeListener implements SchemaChangeListener { + + private static final Logger LOG = LoggerFactory.getLogger(MultiplexingSchemaChangeListener.class); + + private final List listeners = new CopyOnWriteArrayList<>(); + + public MultiplexingSchemaChangeListener() {} + + public MultiplexingSchemaChangeListener(SchemaChangeListener... listeners) { + this(Arrays.asList(listeners)); + } + + public MultiplexingSchemaChangeListener(Collection listeners) { + addListeners(listeners); + } + + private void addListeners(Collection source) { + for (SchemaChangeListener listener : source) { + addListener(listener); + } + } + + private void addListener(SchemaChangeListener toAdd) { + Objects.requireNonNull(toAdd, "listener cannot be null"); + if (toAdd instanceof MultiplexingSchemaChangeListener) { + addListeners(((MultiplexingSchemaChangeListener) toAdd).listeners); + } else { + listeners.add(toAdd); + } + } + + public void register(@NonNull SchemaChangeListener listener) { + addListener(listener); + } + + @Override + public void onKeyspaceCreated(@NonNull KeyspaceMetadata keyspace) { + invokeListeners(listener -> listener.onKeyspaceCreated(keyspace), "onKeyspaceCreated"); + } + + @Override + public void onKeyspaceDropped(@NonNull KeyspaceMetadata keyspace) { + invokeListeners(listener -> listener.onKeyspaceDropped(keyspace), "onKeyspaceDropped"); + } + + @Override + public void onKeyspaceUpdated( + @NonNull KeyspaceMetadata current, @NonNull KeyspaceMetadata previous) { + invokeListeners(listener -> listener.onKeyspaceUpdated(current, previous), "onKeyspaceUpdated"); + } + + @Override + public void onTableCreated(@NonNull TableMetadata table) { + invokeListeners(listener -> listener.onTableCreated(table), "onTableCreated"); + } + + @Override + public void onTableDropped(@NonNull TableMetadata table) { + invokeListeners(listener -> listener.onTableDropped(table), "onTableDropped"); + } + + @Override + public void onTableUpdated(@NonNull TableMetadata current, @NonNull TableMetadata previous) { + invokeListeners(listener -> listener.onTableUpdated(current, previous), "onTableUpdated"); + } + + @Override + public void onUserDefinedTypeCreated(@NonNull UserDefinedType type) { + invokeListeners( + listener -> listener.onUserDefinedTypeCreated(type), "onUserDefinedTypeCreated"); + } + + @Override + public void onUserDefinedTypeDropped(@NonNull UserDefinedType type) { + invokeListeners( + listener -> listener.onUserDefinedTypeDropped(type), "onUserDefinedTypeDropped"); + } + + @Override + public void onUserDefinedTypeUpdated( + @NonNull UserDefinedType current, @NonNull UserDefinedType previous) { + invokeListeners( + listener -> listener.onUserDefinedTypeUpdated(current, previous), + "onUserDefinedTypeUpdated"); + } + + @Override + public void onFunctionCreated(@NonNull FunctionMetadata function) { + invokeListeners(listener -> listener.onFunctionCreated(function), "onFunctionCreated"); + } + + @Override + public void onFunctionDropped(@NonNull FunctionMetadata function) { + invokeListeners(listener -> listener.onFunctionDropped(function), "onFunctionDropped"); + } + + @Override + public void onFunctionUpdated( + @NonNull FunctionMetadata current, @NonNull FunctionMetadata previous) { + invokeListeners(listener -> listener.onFunctionUpdated(current, previous), "onFunctionUpdated"); + } + + @Override + public void onAggregateCreated(@NonNull AggregateMetadata aggregate) { + invokeListeners(listener -> listener.onAggregateCreated(aggregate), "onAggregateCreated"); + } + + @Override + public void onAggregateDropped(@NonNull AggregateMetadata aggregate) { + invokeListeners(listener -> listener.onAggregateDropped(aggregate), "onAggregateDropped"); + } + + @Override + public void onAggregateUpdated( + @NonNull AggregateMetadata current, @NonNull AggregateMetadata previous) { + invokeListeners( + listener -> listener.onAggregateUpdated(current, previous), "onAggregateUpdated"); + } + + @Override + public void onViewCreated(@NonNull ViewMetadata view) { + invokeListeners(listener -> listener.onViewCreated(view), "onViewCreated"); + } + + @Override + public void onViewDropped(@NonNull ViewMetadata view) { + invokeListeners(listener -> listener.onViewDropped(view), "onViewDropped"); + } + + @Override + public void onViewUpdated(@NonNull ViewMetadata current, @NonNull ViewMetadata previous) { + invokeListeners(listener -> listener.onViewUpdated(current, previous), "onViewUpdated"); + } + + @Override + public void onSessionReady(@NonNull Session session) { + invokeListeners(listener -> listener.onSessionReady(session), "onSessionReady"); + } + + @Override + public void close() throws Exception { + for (SchemaChangeListener listener : listeners) { + try { + listener.close(); + } catch (Exception e) { + Loggers.warnWithException( + LOG, "Unexpected error while closing schema change listener {}.", listener, e); + } + } + } + + private void invokeListeners(@NonNull Consumer action, String event) { + for (SchemaChangeListener listener : listeners) { + try { + action.accept(listener); + } catch (Exception e) { + Loggers.warnWithException( + LOG, + "Unexpected error while notifying schema change listener {} of an {} event.", + listener, + event, + e); + } + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java index 2df3935a80f..76fed2e5d24 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/NoopSchemaChangeListener.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,29 +18,12 @@ package com.datastax.oss.driver.internal.core.metadata.schema; import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListenerBase; -import com.datastax.oss.driver.api.core.session.SessionBuilder; import net.jcip.annotations.ThreadSafe; /** - * Default schema change listener implementation with empty methods. - * - *

      To activate this listener, modify the {@code advanced.schema-change-listener} section in the - * driver configuration, for example: - * - *

      - * datastax-java-driver {
      - *   advanced.schema-change-listener {
      - *     class = NoopSchemaChangeListener
      - *   }
      - * }
      - * 
      - * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

      Note that if a listener is specified programmatically with {@link - * SessionBuilder#withSchemaChangeListener(SchemaChangeListener)}, the configuration is ignored. + * Default schema change listener implementation with empty methods. This implementation is used + * when no listeners were registered, neither programmatically nor through the configuration. */ @ThreadSafe public class NoopSchemaChangeListener extends SchemaChangeListenerBase { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java index f27d68923f6..5f01d019ee0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/SchemaChangeType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java index d3c9114b740..b762f35b885 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ScriptBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java index 21d8124c3de..069ce3752b2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/ShallowUserDefinedType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.api.core.detach.AttachmentPoint; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.metadata.schema.parsing.UserDefinedTypeParser; import com.datastax.oss.driver.internal.core.type.DefaultUserDefinedType; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; @@ -33,11 +36,16 @@ /** * A temporary UDT implementation that only contains the keyspace and name. * - *

      When we process a schema refresh that spans multiple UDTs, we can't fully materialize them - * right away, because they might depend on each other and the system table query does not return - * them in topological order. So we do a first pass where UDT field that are also UDTs are resolved - * as instances of this class, then a topological sort, then a second pass to replace all shallow - * definitions by the actual instance (which will be a {@link DefaultUserDefinedType}). + *

      When we refresh a keyspace's UDTs, we can't fully materialize them right away, because they + * might depend on each other and the system table query does not return them in topological order. + * So we do a first pass where UDTs that are nested into other UDTsare resolved as instances of this + * class, then a topological sort, then a second pass to replace all shallow definitions by the + * actual instance (which will be a {@link DefaultUserDefinedType}). + * + *

      This type is also used in the schema builder's internal representation: the keyspace, name and + * frozen-ness are the only things we need to generate a query string. + * + * @see UserDefinedTypeParser */ @Immutable public class ShallowUserDefinedType implements UserDefinedType, Serializable { @@ -78,14 +86,28 @@ public List getFieldNames() { "This implementation should only be used internally, this is likely a driver bug"); } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + throw new UnsupportedOperationException( + "This implementation should only be used internally, this is likely a driver bug"); + } + + @Override + public int firstIndexOf(@NonNull CqlIdentifier id) { + throw new UnsupportedOperationException( + "This implementation should only be used internally, this is likely a driver bug"); + } + + @NonNull @Override - public int firstIndexOf(CqlIdentifier id) { + public List allIndicesOf(@NonNull String name) { throw new UnsupportedOperationException( "This implementation should only be used internally, this is likely a driver bug"); } @Override - public int firstIndexOf(String name) { + public int firstIndexOf(@NonNull String name) { throw new UnsupportedOperationException( "This implementation should only be used internally, this is likely a driver bug"); } @@ -137,12 +159,12 @@ public void attach(@NonNull AttachmentPoint attachmentPoint) { "This implementation should only be used internally, this is likely a driver bug"); } - private void readObject(ObjectInputStream s) throws IOException { + private void readObject(@SuppressWarnings("unused") ObjectInputStream s) throws IOException { throw new UnsupportedOperationException( "This implementation should only be used internally, this is likely a driver bug"); } - private void writeObject(ObjectOutputStream s) throws IOException { + private void writeObject(@SuppressWarnings("unused") ObjectOutputStream s) throws IOException { throw new UnsupportedOperationException( "This implementation should only be used internally, this is likely a driver bug"); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java index a3d682f72df..fe175a98579 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/AggregateChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java index 8c5da87a30f..4ab4f0946ec 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/FunctionChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java index ef448934ddd..0bd2a9d75af 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/KeyspaceChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java index 16d67b8f06b..0902cf4e5b8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TableChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java index 2575d2e6b45..f8048570ac2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/TypeChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java index 84f6a4d1e61..91e59d287f1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/events/ViewChangeEvent.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java index 1e74a1ee9a7..d1f8640a744 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java index 04b6f69edec..9749a921aae 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/CassandraSchemaParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java index 225004ca9ab..1037ccda1ae 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameCompositeParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java index d2470c0d48a..bf252d0bc57 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.UserDefinedType; @@ -31,6 +34,7 @@ import com.datastax.oss.protocol.internal.util.Bytes; import java.util.ArrayList; import java.util.Collections; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -60,6 +64,30 @@ public DataType parse( String toParse, Map userTypes, InternalDriverContext context) { + // We take keyspaceId as a parameter because of the parent interface, but it's actually unused + // by this implementation. + return parse(toParse, userTypes, context, context.getSessionName()); + } + + /** + * Simplified parse method for external use. + * + *

      This is intended for use in Cassandra's UDF implementation (the current version uses the + * similar method from driver 3). + */ + public DataType parse(String toParse, AttachmentPoint attachmentPoint) { + return parse( + toParse, + null, // No caching of user types: nested types will always be fully re-parsed + attachmentPoint, + "parser"); + } + + private DataType parse( + String toParse, + Map userTypes, + AttachmentPoint attachmentPoint, + String logPrefix) { boolean frozen = false; if (isReversed(toParse)) { // Just skip the ReversedType part, we don't care @@ -74,20 +102,20 @@ public DataType parse( if (next.startsWith("org.apache.cassandra.db.marshal.ListType")) { DataType elementType = - parse(keyspaceId, parser.getTypeParameters().get(0), userTypes, context); + parse(parser.getTypeParameters().get(0), userTypes, attachmentPoint, logPrefix); return DataTypes.listOf(elementType, frozen); } if (next.startsWith("org.apache.cassandra.db.marshal.SetType")) { DataType elementType = - parse(keyspaceId, parser.getTypeParameters().get(0), userTypes, context); + parse(parser.getTypeParameters().get(0), userTypes, attachmentPoint, logPrefix); return DataTypes.setOf(elementType, frozen); } if (next.startsWith("org.apache.cassandra.db.marshal.MapType")) { List parameters = parser.getTypeParameters(); - DataType keyType = parse(keyspaceId, parameters.get(0), userTypes, context); - DataType valueType = parse(keyspaceId, parameters.get(1), userTypes, context); + DataType keyType = parse(parameters.get(0), userTypes, attachmentPoint, logPrefix); + DataType valueType = parse(parameters.get(1), userTypes, attachmentPoint, logPrefix); return DataTypes.mapOf(keyType, valueType, frozen); } @@ -95,7 +123,7 @@ public DataType parse( LOG.warn( "[{}] Got o.a.c.db.marshal.FrozenType for something else than a collection, " + "this driver version might be too old for your version of Cassandra", - context.getSessionName()); + logPrefix); if (next.startsWith("org.apache.cassandra.db.marshal.UserType")) { ++parser.idx; // skipping '(' @@ -104,7 +132,7 @@ public DataType parse( parser.skipBlankAndComma(); String typeName = TypeCodecs.TEXT.decode( - Bytes.fromHexString("0x" + parser.readOne()), context.getProtocolVersion()); + Bytes.fromHexString("0x" + parser.readOne()), attachmentPoint.getProtocolVersion()); if (typeName == null) { throw new AssertionError("Type name cannot be null, this is a server bug"); } @@ -120,11 +148,11 @@ public DataType parse( parser.skipBlankAndComma(); for (Map.Entry entry : nameAndTypeParameters.entrySet()) { CqlIdentifier fieldName = CqlIdentifier.fromInternal(entry.getKey()); - DataType fieldType = parse(keyspaceId, entry.getValue(), userTypes, context); + DataType fieldType = parse(entry.getValue(), userTypes, attachmentPoint, logPrefix); builder.withField(fieldName, fieldType); } - // create a frozen UserType since C* 2.x UDTs are always frozen. - return builder.frozen().build(); + // Create a frozen UserType since C* 2.x UDTs are always frozen. + return builder.frozen().withAttachmentPoint(attachmentPoint).build(); } } @@ -132,9 +160,16 @@ public DataType parse( List rawTypes = parser.getTypeParameters(); ImmutableList.Builder componentTypesBuilder = ImmutableList.builder(); for (String rawType : rawTypes) { - componentTypesBuilder.add(parse(keyspaceId, rawType, userTypes, context)); + componentTypesBuilder.add(parse(rawType, userTypes, attachmentPoint, logPrefix)); } - return new DefaultTupleType(componentTypesBuilder.build(), context); + return new DefaultTupleType(componentTypesBuilder.build(), attachmentPoint); + } + + if (next.startsWith("org.apache.cassandra.db.marshal.VectorType")) { + Iterator rawTypes = parser.getTypeParameters().iterator(); + DataType subtype = parse(rawTypes.next(), userTypes, attachmentPoint, logPrefix); + int dimensions = Integer.parseInt(rawTypes.next()); + return DataTypes.vectorOf(subtype, dimensions); } DataType type = NATIVE_TYPES_BY_CLASS_NAME.get(next); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java index 21c492a1231..8d5e068b431 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,12 +24,14 @@ import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; import com.datastax.oss.driver.internal.core.type.DefaultTupleType; +import com.datastax.oss.driver.internal.core.type.DefaultVectorType; import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Map; import net.jcip.annotations.ThreadSafe; @@ -64,7 +68,11 @@ private DataType parse( Parser parser = new Parser(toParse, 0); String type = parser.parseTypeName(); - DataType nativeType = NATIVE_TYPES_BY_NAME.get(type.toLowerCase()); + if (type.equalsIgnoreCase(RawColumn.THRIFT_EMPTY_TYPE)) { + return DataTypes.custom(type); + } + + DataType nativeType = NATIVE_TYPES_BY_NAME.get(type.toLowerCase(Locale.ROOT)); if (nativeType != null) { return nativeType; } @@ -132,6 +140,16 @@ private DataType parse( return new DefaultTupleType(componentTypesBuilder.build(), context); } + if (type.equalsIgnoreCase("vector")) { + if (parameters.size() != 2) { + throw new IllegalArgumentException( + String.format("Expecting two parameters for vector custom type, got %s", parameters)); + } + DataType subType = parse(parameters.get(0), keyspaceId, false, userTypes, context); + int dimensions = Integer.parseInt(parameters.get(1)); + return new DefaultVectorType(subType, dimensions); + } + throw new IllegalArgumentException("Could not parse type name " + toParse); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java index 42ee4c37b05..0f191d08a53 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java index 9a4a5bf148a..5fa64027be5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DefaultSchemaParserFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,8 @@ */ package com.datastax.oss.driver.internal.core.metadata.schema.parsing; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.dse.driver.internal.core.metadata.schema.parsing.DseSchemaParser; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; import net.jcip.annotations.ThreadSafe; @@ -30,6 +34,7 @@ public DefaultSchemaParserFactory(InternalDriverContext context) { @Override public SchemaParser newInstance(SchemaRows rows) { - return new CassandraSchemaParser(rows, context); + boolean isDse = rows.getNode().getExtras().containsKey(DseNodeProperties.DSE_VERSION); + return isDse ? new DseSchemaParser(rows, context) : new CassandraSchemaParser(rows, context); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java index edd5a6bfe8f..54786e999ac 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java index c9f65de601d..331f4841f79 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RawColumn.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,6 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.shaded.guava.common.collect.Lists; import com.datastax.oss.driver.shaded.guava.common.primitives.Ints; @@ -42,6 +43,14 @@ public class RawColumn implements Comparable { public static final String KIND_COMPACT_VALUE = "compact_value"; public static final String KIND_STATIC = "static"; + /** + * Upon migration from thrift to CQL, Cassandra internally creates a surrogate column "value" of + * type {@code EmptyType} for dense tables. This resolves into this CQL type name. + * + *

      This column shouldn't be exposed to the user but is currently exposed in system tables. + */ + public static final String THRIFT_EMPTY_TYPE = "empty"; + public final CqlIdentifier name; public String kind; public final int position; @@ -51,8 +60,7 @@ public class RawColumn implements Comparable { public final String indexType; public final Map indexOptions; - private RawColumn( - AdminRow row, CqlIdentifier keyspaceId, Map userTypes) { + private RawColumn(AdminRow row) { // Cassandra < 3.0: // CREATE TABLE system.schema_columns ( // keyspace_name text, @@ -139,17 +147,15 @@ private static int rank(String kind) { } } - public static List toRawColumns( - Collection rows, - CqlIdentifier keyspaceId, - Map userTypes) { + @SuppressWarnings("MixedMutabilityReturnType") + public static List toRawColumns(Collection rows) { if (rows.isEmpty()) { return Collections.emptyList(); } else { // Use a mutable list, we might remove some elements later List result = Lists.newArrayListWithExpectedSize(rows.size()); for (AdminRow row : rows) { - result.add(new RawColumn(row, keyspaceId, userTypes)); + result.add(new RawColumn(row)); } return result; } @@ -158,7 +164,7 @@ public static List toRawColumns( /** * Helper method to filter columns while parsing a table's metadata. * - *

      Upon migration from thrift to CQL, we internally create a pair of surrogate + *

      Upon migration from thrift to CQL, Cassandra internally creates a pair of surrogate * clustering/regular columns for compact static tables. These columns shouldn't be exposed to the * user but are currently returned by C*. We also need to remove the static keyword for all other * columns in the table. @@ -181,18 +187,12 @@ public static void pruneStaticCompactTableColumns(List columns) { } } - /** - * Helper method to filter columns while parsing a table's metadata. - * - *

      Upon migration from thrift to CQL, we internally create a surrogate column "value" of type - * EmptyType for dense tables. This column shouldn't be exposed to the user but is currently - * returned by C*. - */ + /** Helper method to filter columns while parsing a table's metadata. */ public static void pruneDenseTableColumnsV3(List columns) { ListIterator iterator = columns.listIterator(); while (iterator.hasNext()) { RawColumn column = iterator.next(); - if (column.kind.equals(KIND_REGULAR) && "empty".equals(column.dataType)) { + if (column.kind.equals(KIND_REGULAR) && THRIFT_EMPTY_TYPE.equals(column.dataType)) { iterator.remove(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java index 43b942b1669..86c914459d7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/RelationParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,11 @@ */ package com.datastax.oss.driver.internal.core.metadata.schema.parsing; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.schema.Describable; import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; @@ -72,7 +78,7 @@ protected Map parseOptions(AdminRow row) { CqlIdentifier.fromInternal("compression"), ImmutableMap.copyOf(SimpleJsonParser.parseStringMap(row.getString(name)))); } - } else { + } else if (!isDeprecatedInCassandra4(name)) { // Default case, read the value in a generic fashion Object value = row.get(name, codec); if (value != null) { @@ -83,6 +89,26 @@ protected Map parseOptions(AdminRow row) { return builder.build(); } + /** + * Handle a few oddities in Cassandra 4: some options still appear in system_schema.tables, but + * they are not valid in CREATE statements anymore. We need to exclude them from our metadata, + * otherwise {@link Describable#describe(boolean)} will generate invalid CQL. + */ + private boolean isDeprecatedInCassandra4(String name) { + return isCassandra4OrAbove() + && (name.equals("read_repair_chance") + || name.equals("dclocal_read_repair_chance") + // default_time_to_live is not allowed in CREATE MATERIALIZED VIEW statements + || (name.equals("default_time_to_live") && (this instanceof ViewParser))); + } + + private boolean isCassandra4OrAbove() { + Node node = rows.getNode(); + return !node.getExtras().containsKey(DseNodeProperties.DSE_VERSION) + && node.getCassandraVersion() != null + && node.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) >= 0; + } + public static void appendOptions(Map options, ScriptBuilder builder) { for (Map.Entry entry : options.entrySet()) { CqlIdentifier name = entry.getKey(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java index beb02894a0d..109ebea45c1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java index c61e8933c89..93db1472e4d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java index 8da63a9018c..e979a8fd822 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SimpleJsonParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,6 +47,7 @@ private SimpleJsonParser(String input) { this.input = input; } + @SuppressWarnings("MixedMutabilityReturnType") public static List parseStringList(String input) { if (input == null || input.isEmpty()) { return Collections.emptyList(); @@ -73,6 +76,7 @@ public static List parseStringList(String input) { } } + @SuppressWarnings("MixedMutabilityReturnType") public static Map parseStringMap(String input) { if (input == null || input.isEmpty()) { return Collections.emptyMap(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java index d7b09b7b11f..a3bda428ef3 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -121,13 +123,11 @@ public TableMetadata parseTable( tableRow.getString( tableRow.contains("table_name") ? "table_name" : "columnfamily_name")); - UUID uuid = (tableRow.contains("id")) ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); + UUID uuid = tableRow.contains("id") ? tableRow.getUuid("id") : tableRow.getUuid("cf_id"); List rawColumns = RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId), - keyspaceId, - userTypes); + rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); if (rawColumns.isEmpty()) { LOG.warn( "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", @@ -234,9 +234,7 @@ TableMetadata parseVirtualTable( List rawColumns = RawColumn.toRawColumns( - rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId), - keyspaceId, - userTypes); + rows.virtualColumns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(tableId)); if (rawColumns.isEmpty()) { LOG.warn( "[{}] Processing TABLE refresh for {}.{} but found no matching rows, skipping", diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java index f04b6c9a807..442f46ee432 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java index 48bdac0a07e..52773ea1c45 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParser.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -87,9 +89,7 @@ public ViewMetadata parseView( List rawColumns = RawColumn.toRawColumns( - rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId), - keyspaceId, - userTypes); + rows.columns().getOrDefault(keyspaceId, ImmutableMultimap.of()).get(viewId)); if (rawColumns.isEmpty()) { LOG.warn( "[{}] Processing VIEW refresh for {}.{} but found no matching rows, skipping", diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java index 556c9c58b6b..7577fd1bb92 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueries.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,20 +18,16 @@ package com.datastax.oss.driver.internal.core.metadata.schema.queries; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import java.util.Optional; -import java.util.concurrent.CompletableFuture; import net.jcip.annotations.ThreadSafe; @ThreadSafe public class Cassandra21SchemaQueries extends CassandraSchemaQueries { public Cassandra21SchemaQueries( - DriverChannel channel, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, false, refreshFuture, config, logPrefix); + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); } @Override @@ -86,4 +84,14 @@ protected Optional selectVirtualTablesQuery() { protected Optional selectVirtualColumnsQuery() { return Optional.empty(); } + + @Override + protected Optional selectEdgesQuery() { + return Optional.empty(); + } + + @Override + protected Optional selectVerticiesQuery() { + return Optional.empty(); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java index 130599b86e2..ff09917b3c7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueries.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,20 +18,16 @@ package com.datastax.oss.driver.internal.core.metadata.schema.queries; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import java.util.Optional; -import java.util.concurrent.CompletableFuture; import net.jcip.annotations.ThreadSafe; @ThreadSafe public class Cassandra22SchemaQueries extends CassandraSchemaQueries { public Cassandra22SchemaQueries( - DriverChannel channel, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, false, refreshFuture, config, logPrefix); + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); } @Override @@ -86,4 +84,14 @@ protected Optional selectVirtualTablesQuery() { protected Optional selectVirtualColumnsQuery() { return Optional.empty(); } + + @Override + protected Optional selectEdgesQuery() { + return Optional.empty(); + } + + @Override + protected Optional selectVerticiesQuery() { + return Optional.empty(); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java index c2c97873624..8c36d0f4217 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueries.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,20 +18,16 @@ package com.datastax.oss.driver.internal.core.metadata.schema.queries; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import java.util.Optional; -import java.util.concurrent.CompletableFuture; import net.jcip.annotations.ThreadSafe; @ThreadSafe public class Cassandra3SchemaQueries extends CassandraSchemaQueries { public Cassandra3SchemaQueries( - DriverChannel channel, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, true, refreshFuture, config, logPrefix); + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); } @Override @@ -86,4 +84,14 @@ protected Optional selectVirtualTablesQuery() { protected Optional selectVirtualColumnsQuery() { return Optional.empty(); } + + @Override + protected Optional selectEdgesQuery() { + return Optional.empty(); + } + + @Override + protected Optional selectVerticiesQuery() { + return Optional.empty(); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java index 641a97119b9..e2de0b419ed 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra4SchemaQueries.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,20 +18,16 @@ package com.datastax.oss.driver.internal.core.metadata.schema.queries; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import java.util.Optional; -import java.util.concurrent.CompletableFuture; import net.jcip.annotations.ThreadSafe; @ThreadSafe public class Cassandra4SchemaQueries extends Cassandra3SchemaQueries { public Cassandra4SchemaQueries( - DriverChannel channel, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, refreshFuture, config, logPrefix); + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java index 8aa4ebe8f83..92ab2501c12 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaQueries.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +19,11 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.internal.core.channel.DriverChannel; -import com.datastax.oss.driver.internal.core.util.Loggers; import com.datastax.oss.driver.internal.core.util.NanoTime; import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; @@ -45,16 +46,13 @@ public abstract class CassandraSchemaQueries implements SchemaQueries { private final DriverChannel channel; private final EventExecutor adminExecutor; - private final boolean isCassandraV3; + private final Node node; private final String logPrefix; private final Duration timeout; private final int pageSize; - private final String whereClause; + private final KeyspaceFilter keyspaceFilter; // The future we return from execute, completes when all the queries are done. private final CompletableFuture schemaRowsFuture = new CompletableFuture<>(); - // A future that completes later, when the whole refresh is done. We just store it here to pass it - // down to the next step. - public final CompletableFuture refreshFuture; private final long startTimeNs = System.nanoTime(); // All non-final fields are accessed exclusively on adminExecutor @@ -62,15 +60,10 @@ public abstract class CassandraSchemaQueries implements SchemaQueries { private int pendingQueries; protected CassandraSchemaQueries( - DriverChannel channel, - boolean isCassandraV3, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { this.channel = channel; this.adminExecutor = channel.eventLoop(); - this.isCassandraV3 = isCassandraV3; - this.refreshFuture = refreshFuture; + this.node = node; this.logPrefix = logPrefix; this.timeout = config.getDuration(DefaultDriverOption.METADATA_SCHEMA_REQUEST_TIMEOUT); this.pageSize = config.getInt(DefaultDriverOption.METADATA_SCHEMA_REQUEST_PAGE_SIZE); @@ -78,25 +71,8 @@ protected CassandraSchemaQueries( List refreshedKeyspaces = config.getStringList( DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList()); - this.whereClause = buildWhereClause(refreshedKeyspaces); - } - - private static String buildWhereClause(List refreshedKeyspaces) { - if (refreshedKeyspaces.isEmpty()) { - return ""; - } else { - StringBuilder builder = new StringBuilder(" WHERE keyspace_name in ("); - boolean first = true; - for (String keyspace : refreshedKeyspaces) { - if (first) { - first = false; - } else { - builder.append(","); - } - builder.append('\'').append(keyspace).append('\''); - } - return builder.append(")").toString(); - } + assert refreshedKeyspaces != null; // per the default value + this.keyspaceFilter = KeyspaceFilter.newInstance(logPrefix, refreshedKeyspaces); } protected abstract String selectKeyspacesQuery(); @@ -121,6 +97,10 @@ private static String buildWhereClause(List refreshedKeyspaces) { protected abstract Optional selectAggregatesQuery(); + protected abstract Optional selectEdgesQuery(); + + protected abstract Optional selectVerticiesQuery(); + @Override public CompletionStage execute() { RunOrSchedule.on(adminExecutor, this::executeOnAdminExecutor); @@ -130,42 +110,42 @@ public CompletionStage execute() { private void executeOnAdminExecutor() { assert adminExecutor.inEventLoop(); - schemaRowsBuilder = new CassandraSchemaRows.Builder(isCassandraV3, refreshFuture, logPrefix); + schemaRowsBuilder = new CassandraSchemaRows.Builder(node, keyspaceFilter, logPrefix); + String whereClause = keyspaceFilter.getWhereClause(); - query(selectKeyspacesQuery() + whereClause, schemaRowsBuilder::withKeyspaces, true); - query(selectTypesQuery() + whereClause, schemaRowsBuilder::withTypes, true); - query(selectTablesQuery() + whereClause, schemaRowsBuilder::withTables, true); - query(selectColumnsQuery() + whereClause, schemaRowsBuilder::withColumns, true); + query(selectKeyspacesQuery() + whereClause, schemaRowsBuilder::withKeyspaces); + query(selectTypesQuery() + whereClause, schemaRowsBuilder::withTypes); + query(selectTablesQuery() + whereClause, schemaRowsBuilder::withTables); + query(selectColumnsQuery() + whereClause, schemaRowsBuilder::withColumns); selectIndexesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withIndexes, true)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withIndexes)); selectViewsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withViews, true)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withViews)); selectFunctionsQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withFunctions, true)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withFunctions)); selectAggregatesQuery() - .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withAggregates, true)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withAggregates)); selectVirtualKeyspacesQuery() - .ifPresent( - select -> query(select + whereClause, schemaRowsBuilder::withVirtualKeyspaces, false)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualKeyspaces)); selectVirtualTablesQuery() - .ifPresent( - select -> query(select + whereClause, schemaRowsBuilder::withVirtualTables, false)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualTables)); selectVirtualColumnsQuery() - .ifPresent( - select -> query(select + whereClause, schemaRowsBuilder::withVirtualColumns, false)); + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVirtualColumns)); + selectEdgesQuery() + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withEdges)); + selectVerticiesQuery() + .ifPresent(select -> query(select + whereClause, schemaRowsBuilder::withVertices)); } private void query( String queryString, - Function, CassandraSchemaRows.Builder> builderUpdater, - boolean warnIfMissing) { + Function, CassandraSchemaRows.Builder> builderUpdater) { assert adminExecutor.inEventLoop(); pendingQueries += 1; query(queryString) .whenCompleteAsync( - (result, error) -> handleResult(result, error, builderUpdater, warnIfMissing), - adminExecutor); + (result, error) -> handleResult(result, error, builderUpdater), adminExecutor); } @VisibleForTesting @@ -173,27 +153,18 @@ protected CompletionStage query(String query) { return AdminRequestHandler.query(channel, query, timeout, pageSize, logPrefix).start(); } - /** - * @param warnIfMissing whether to log a warning if the queried table does not exist: some DDAC - * versions report release_version > 4, but don't have a system_virtual_schema keyspace, so we - * want to ignore those errors silently. - */ private void handleResult( AdminResult result, Throwable error, - Function, CassandraSchemaRows.Builder> builderUpdater, - boolean warnIfMissing) { + Function, CassandraSchemaRows.Builder> builderUpdater) { + + // If another query already failed, we've already propagated the failure so just ignore this one + if (schemaRowsFuture.isCompletedExceptionally()) { + return; + } + if (error != null) { - if (warnIfMissing || !error.getMessage().contains("does not exist")) { - Loggers.warnWithException( - LOG, - "[{}] Error during schema refresh, new metadata might be incomplete", - logPrefix, - error); - } - // Proceed without the results of this query, the rest of the schema refresh will run on a - // "best effort" basis - markQueryComplete(); + schemaRowsFuture.completeExceptionally(error); } else { // Store the rows of the current page in the builder schemaRowsBuilder = builderUpdater.apply(result); @@ -201,20 +172,16 @@ private void handleResult( result .nextPage() .whenCompleteAsync( - (nextResult, nextError) -> - handleResult(nextResult, nextError, builderUpdater, warnIfMissing), + (nextResult, nextError) -> handleResult(nextResult, nextError, builderUpdater), adminExecutor); } else { - markQueryComplete(); + pendingQueries -= 1; + if (pendingQueries == 0) { + LOG.debug( + "[{}] Schema queries took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); + schemaRowsFuture.complete(schemaRowsBuilder.build()); + } } } } - - private void markQueryComplete() { - pendingQueries -= 1; - if (pendingQueries == 0) { - LOG.debug("[{}] Schema queries took {}", logPrefix, NanoTime.formatTimeSince(startTimeNs)); - schemaRowsFuture.complete(schemaRowsBuilder.build()); - } - } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java index 49a49764021..95af0739300 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/CassandraSchemaRows.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,8 +17,10 @@ */ package com.datastax.oss.driver.internal.core.metadata.schema.queries; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeClassNameParser; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeCqlNameParser; @@ -26,10 +30,10 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMultimap; import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; +import edu.umd.cs.findbugs.annotations.NonNull; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import net.jcip.annotations.Immutable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,8 +41,8 @@ @Immutable public class CassandraSchemaRows implements SchemaRows { + private final Node node; private final DataTypeParser dataTypeParser; - private final CompletableFuture refreshFuture; private final List keyspaces; private final List virtualKeyspaces; private final Multimap tables; @@ -50,10 +54,12 @@ public class CassandraSchemaRows implements SchemaRows { private final Map> columns; private final Map> virtualColumns; private final Map> indexes; + private final Map> vertices; + private final Map> edges; private CassandraSchemaRows( - boolean isCassandraV3, - CompletableFuture refreshFuture, + Node node, + DataTypeParser dataTypeParser, List keyspaces, List virtualKeyspaces, Multimap tables, @@ -64,10 +70,11 @@ private CassandraSchemaRows( Map> indexes, Multimap types, Multimap functions, - Multimap aggregates) { - this.dataTypeParser = - isCassandraV3 ? new DataTypeCqlNameParser() : new DataTypeClassNameParser(); - this.refreshFuture = refreshFuture; + Multimap aggregates, + Map> vertices, + Map> edges) { + this.node = node; + this.dataTypeParser = dataTypeParser; this.keyspaces = keyspaces; this.virtualKeyspaces = virtualKeyspaces; this.tables = tables; @@ -79,16 +86,19 @@ private CassandraSchemaRows( this.types = types; this.functions = functions; this.aggregates = aggregates; + this.vertices = vertices; + this.edges = edges; } + @NonNull @Override - public DataTypeParser dataTypeParser() { - return dataTypeParser; + public Node getNode() { + return node; } @Override - public CompletableFuture refreshFuture() { - return refreshFuture; + public DataTypeParser dataTypeParser() { + return dataTypeParser; } @Override @@ -146,12 +156,23 @@ public Map> indexes() { return indexes; } + @Override + public Map> vertices() { + return vertices; + } + + @Override + public Map> edges() { + return edges; + } + public static class Builder { private static final Logger LOG = LoggerFactory.getLogger(Builder.class); - private final boolean isCassandraV3; - private final CompletableFuture refreshFuture; + private final Node node; + private final DataTypeParser dataTypeParser; private final String tableNameColumn; + private final KeyspaceFilter keyspaceFilter; private final String logPrefix; private final ImmutableList.Builder keyspacesBuilder = ImmutableList.builder(); private final ImmutableList.Builder virtualKeyspacesBuilder = ImmutableList.builder(); @@ -173,22 +194,55 @@ public static class Builder { virtualColumnsBuilders = new LinkedHashMap<>(); private final Map> indexesBuilders = new LinkedHashMap<>(); + private final Map> + verticesBuilders = new LinkedHashMap<>(); + private final Map> + edgesBuilders = new LinkedHashMap<>(); - public Builder( - boolean isCassandraV3, CompletableFuture refreshFuture, String logPrefix) { - this.isCassandraV3 = isCassandraV3; - this.refreshFuture = refreshFuture; + public Builder(Node node, KeyspaceFilter keyspaceFilter, String logPrefix) { + this.node = node; + this.keyspaceFilter = keyspaceFilter; this.logPrefix = logPrefix; - this.tableNameColumn = isCassandraV3 ? "table_name" : "columnfamily_name"; + if (isCassandraV3OrAbove(node)) { + this.tableNameColumn = "table_name"; + this.dataTypeParser = new DataTypeCqlNameParser(); + } else { + this.tableNameColumn = "columnfamily_name"; + this.dataTypeParser = new DataTypeClassNameParser(); + } + } + + private static boolean isCassandraV3OrAbove(Node node) { + // We already did those checks in DefaultSchemaQueriesFactory. + // We could pass along booleans (isCassandraV3, isDse...), but passing the whole Node is + // better for maintainability, in case we need to do more checks in downstream components in + // the future. + Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); + if (dseVersion != null) { + dseVersion = dseVersion.nextStable(); + return dseVersion.compareTo(Version.V5_0_0) >= 0; + } else { + Version cassandraVersion = node.getCassandraVersion(); + if (cassandraVersion == null) { + cassandraVersion = Version.V3_0_0; + } else { + cassandraVersion = cassandraVersion.nextStable(); + } + return cassandraVersion.compareTo(Version.V3_0_0) >= 0; + } } public Builder withKeyspaces(Iterable rows) { - keyspacesBuilder.addAll(rows); + for (AdminRow row : rows) { + put(keyspacesBuilder, row); + } return this; } public Builder withVirtualKeyspaces(Iterable rows) { - virtualKeyspacesBuilder.addAll(rows); + for (AdminRow row : rows) { + put(virtualKeyspacesBuilder, row); + } return this; } @@ -255,12 +309,35 @@ public Builder withIndexes(Iterable rows) { return this; } + public Builder withVertices(Iterable rows) { + for (AdminRow row : rows) { + putByKeyspaceAndTable(row, verticesBuilders); + } + return this; + } + + public Builder withEdges(Iterable rows) { + for (AdminRow row : rows) { + putByKeyspaceAndTable(row, edgesBuilders); + } + return this; + } + + private void put(ImmutableList.Builder builder, AdminRow row) { + String keyspace = row.getString("keyspace_name"); + if (keyspace == null) { + LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); + } else if (keyspaceFilter.includes(keyspace)) { + builder.add(row); + } + } + private void putByKeyspace( AdminRow row, ImmutableMultimap.Builder builder) { String keyspace = row.getString("keyspace_name"); if (keyspace == null) { LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); - } else { + } else if (keyspaceFilter.includes(keyspace)) { builder.put(CqlIdentifier.fromInternal(keyspace), row); } } @@ -274,7 +351,7 @@ private void putByKeyspaceAndTable( LOG.warn("[{}] Skipping system row with missing keyspace name", logPrefix); } else if (table == null) { LOG.warn("[{}] Skipping system row with missing table name", logPrefix); - } else { + } else if (keyspaceFilter.includes(keyspace)) { ImmutableMultimap.Builder builder = builders.computeIfAbsent( CqlIdentifier.fromInternal(keyspace), s -> ImmutableListMultimap.builder()); @@ -284,8 +361,8 @@ private void putByKeyspaceAndTable( public CassandraSchemaRows build() { return new CassandraSchemaRows( - isCassandraV3, - refreshFuture, + node, + dataTypeParser, keyspacesBuilder.build(), virtualKeyspacesBuilder.build(), tablesBuilder.build(), @@ -296,15 +373,20 @@ public CassandraSchemaRows build() { build(indexesBuilders), typesBuilder.build(), functionsBuilder.build(), - aggregatesBuilder.build()); + aggregatesBuilder.build(), + build(verticesBuilders), + build(edgesBuilders)); } private static Map> build( Map> builders) { ImmutableMap.Builder> builder = ImmutableMap.builder(); - for (Map.Entry> entry : builders.entrySet()) { - builder.put(entry.getKey(), entry.getValue().build()); - } + builders + .entrySet() + .forEach( + (entry) -> { + builder.put(entry.getKey(), entry.getValue().build()); + }); return builder.build(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java index aee7ccaa5cb..e537475ed7b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +17,12 @@ */ package com.datastax.oss.driver.internal.core.metadata.schema.queries; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import java.util.concurrent.CompletableFuture; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +41,7 @@ public DefaultSchemaQueriesFactory(InternalDriverContext context) { } @Override - public SchemaQueries newInstance(CompletableFuture refreshFuture) { + public SchemaQueries newInstance() { DriverChannel channel = context.getControlConnection().channel(); if (channel == null || channel.closeFuture().isDone()) { throw new IllegalStateException("Control channel not available, aborting schema refresh"); @@ -56,32 +57,55 @@ public SchemaQueries newInstance(CompletableFuture refreshFuture) { "Could not find control node metadata " + channel.getEndPoint() + ", aborting schema refresh")); - return newInstance(node, channel, refreshFuture); + return newInstance(node, channel); } - protected SchemaQueries newInstance( - Node node, DriverChannel channel, CompletableFuture refreshFuture) { - Version version = node.getCassandraVersion(); - if (version == null) { - LOG.warn( - "[{}] Cassandra version missing for {}, defaulting to {}", - logPrefix, - node, - Version.V3_0_0); - version = Version.V3_0_0; - } else { - version = version.nextStable(); - } + protected SchemaQueries newInstance(Node node, DriverChannel channel) { + DriverExecutionProfile config = context.getConfig().getDefaultProfile(); - LOG.debug("[{}] Sending schema queries to {} with version {}", logPrefix, node, version); - if (version.compareTo(Version.V2_2_0) < 0) { - return new Cassandra21SchemaQueries(channel, refreshFuture, config, logPrefix); - } else if (version.compareTo(Version.V3_0_0) < 0) { - return new Cassandra22SchemaQueries(channel, refreshFuture, config, logPrefix); - } else if (version.compareTo(Version.V4_0_0) < 0) { - return new Cassandra3SchemaQueries(channel, refreshFuture, config, logPrefix); + + Version dseVersion = (Version) node.getExtras().get(DseNodeProperties.DSE_VERSION); + if (dseVersion != null) { + dseVersion = dseVersion.nextStable(); + + LOG.debug( + "[{}] Sending schema queries to {} with DSE version {}", logPrefix, node, dseVersion); + // 4.8 is the oldest version supported, which uses C* 2.1 schema + if (dseVersion.compareTo(Version.V5_0_0) < 0) { + return new Cassandra21SchemaQueries(channel, node, config, logPrefix); + } else if (dseVersion.compareTo(Version.V6_7_0) < 0) { + // 5.0 - 6.7 uses C* 3.0 schema + return new Cassandra3SchemaQueries(channel, node, config, logPrefix); + } else if (dseVersion.compareTo(Version.V6_8_0) < 0) { + // 6.7 uses C* 4.0 schema + return new Cassandra4SchemaQueries(channel, node, config, logPrefix); + } else { + // 6.8+ uses DSE 6.8 schema (C* 4.0 schema with graph metadata) (JAVA-1898) + return new Dse68SchemaQueries(channel, node, config, logPrefix); + } } else { - return new Cassandra4SchemaQueries(channel, refreshFuture, config, logPrefix); + Version cassandraVersion = node.getCassandraVersion(); + if (cassandraVersion == null) { + LOG.warn( + "[{}] Cassandra version missing for {}, defaulting to {}", + logPrefix, + node, + Version.V3_0_0); + cassandraVersion = Version.V3_0_0; + } else { + cassandraVersion = cassandraVersion.nextStable(); + } + LOG.debug( + "[{}] Sending schema queries to {} with version {}", logPrefix, node, cassandraVersion); + if (cassandraVersion.compareTo(Version.V2_2_0) < 0) { + return new Cassandra21SchemaQueries(channel, node, config, logPrefix); + } else if (cassandraVersion.compareTo(Version.V3_0_0) < 0) { + return new Cassandra22SchemaQueries(channel, node, config, logPrefix); + } else if (cassandraVersion.compareTo(Version.V4_0_0) < 0) { + return new Cassandra3SchemaQueries(channel, node, config, logPrefix); + } else { + return new Cassandra4SchemaQueries(channel, node, config, logPrefix); + } } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java new file mode 100644 index 00000000000..460df8b59e5 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Dse68SchemaQueries.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema.queries; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.channel.DriverChannel; +import java.util.Optional; + +/** + * The system table queries to refresh the schema in DSE 6.8. + * + *

      There are two additional tables for per-table graph metadata. + */ +public class Dse68SchemaQueries extends Cassandra4SchemaQueries { + + public Dse68SchemaQueries( + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); + } + + @Override + protected Optional selectEdgesQuery() { + return Optional.of("SELECT * FROM system_schema.edges"); + } + + @Override + protected Optional selectVerticiesQuery() { + return Optional.of("SELECT * FROM system_schema.vertices"); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java new file mode 100644 index 00000000000..a483a904f6e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilter.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema.queries; + +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; + +/** + * Filters keyspaces during schema metadata queries. + * + *

      Depending on the circumstances, we do it either on the server side with a WHERE IN clause that + * will be appended to every query, or on the client side with a predicate that will be applied to + * every fetched row. + */ +public interface KeyspaceFilter { + + static KeyspaceFilter newInstance(@NonNull String logPrefix, @NonNull List specs) { + if (specs.isEmpty()) { + return INCLUDE_ALL; + } else { + return new RuleBasedKeyspaceFilter(logPrefix, specs); + } + } + + /** The WHERE IN clause, or an empty string if there is no server-side filtering. */ + @NonNull + String getWhereClause(); + + /** The predicate that will be invoked for client-side filtering. */ + boolean includes(@NonNull String keyspace); + + KeyspaceFilter INCLUDE_ALL = + new KeyspaceFilter() { + @NonNull + @Override + public String getWhereClause() { + return ""; + } + + @Override + public boolean includes(@NonNull String keyspace) { + return true; + } + }; +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java new file mode 100644 index 00000000000..38a8c116c45 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/RuleBasedKeyspaceFilter.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema.queries; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.function.Predicate; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Filters keyspaces during schema metadata queries. + * + *

      Depending on the circumstances, we do it either on the server side with a WHERE IN clause that + * will be appended to every query, or on the client side with a predicate that will be applied to + * every fetched row. + */ +class RuleBasedKeyspaceFilter implements KeyspaceFilter { + + private static final Logger LOG = LoggerFactory.getLogger(RuleBasedKeyspaceFilter.class); + + private static final Pattern EXACT_INCLUDE = Pattern.compile("\\w+"); + private static final Pattern EXACT_EXCLUDE = Pattern.compile("!\\s*(\\w+)"); + private static final Pattern REGEX_INCLUDE = Pattern.compile("/(.+)/"); + private static final Pattern REGEX_EXCLUDE = Pattern.compile("!\\s*/(.+)/"); + + private final String logPrefix; + private final String whereClause; + private final Set exactIncludes = new HashSet<>(); + private final Set exactExcludes = new HashSet<>(); + private final List> regexIncludes = new ArrayList<>(); + private final List> regexExcludes = new ArrayList<>(); + + private final boolean isDebugEnabled; + private final Set loggedKeyspaces; + + RuleBasedKeyspaceFilter(@NonNull String logPrefix, @NonNull List specs) { + assert !specs.isEmpty(); // see KeyspaceFilter#newInstance + + this.logPrefix = logPrefix; + for (String spec : specs) { + spec = spec.trim(); + Matcher matcher; + if (EXACT_INCLUDE.matcher(spec).matches()) { + exactIncludes.add(spec); + if (exactExcludes.remove(spec)) { + LOG.warn( + "[{}] '{}' is both included and excluded, ignoring the exclusion", logPrefix, spec); + } + } else if ((matcher = EXACT_EXCLUDE.matcher(spec)).matches()) { + String name = matcher.group(1); + if (exactIncludes.contains(name)) { + LOG.warn( + "[{}] '{}' is both included and excluded, ignoring the exclusion", logPrefix, name); + } else { + exactExcludes.add(name); + } + } else if ((matcher = REGEX_INCLUDE.matcher(spec)).matches()) { + compile(matcher.group(1)).map(regexIncludes::add); + } else if ((matcher = REGEX_EXCLUDE.matcher(spec)).matches()) { + compile(matcher.group(1)).map(regexExcludes::add); + } else { + LOG.warn( + "[{}] Error while parsing {}: invalid element '{}', skipping", + logPrefix, + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath(), + spec); + } + } + + if (!exactIncludes.isEmpty() && regexIncludes.isEmpty() && regexExcludes.isEmpty()) { + // We can filter on the server + whereClause = buildWhereClause(exactIncludes); + if (!exactExcludes.isEmpty()) { + // Proceed, but this is probably a mistake + LOG.warn( + "[{}] {} only has exact includes and excludes, the excludes are redundant", + logPrefix, + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath()); + } + LOG.debug("[{}] Filtering server-side with '{}'", logPrefix, whereClause); + } else { + whereClause = ""; + LOG.debug("[{}] No server-side filtering", logPrefix); + } + + isDebugEnabled = LOG.isDebugEnabled(); + loggedKeyspaces = isDebugEnabled ? new HashSet<>() : null; + } + + @NonNull + @Override + public String getWhereClause() { + return whereClause; + } + + @Override + public boolean includes(@NonNull String keyspace) { + if (exactIncludes.contains(keyspace)) { + log(keyspace, true, "it is included by name"); + return true; + } else if (exactExcludes.contains(keyspace)) { + log(keyspace, false, "it is excluded by name"); + return false; + } else if (regexIncludes.isEmpty()) { + if (regexExcludes.isEmpty()) { + log(keyspace, false, "it is not included by name"); + return false; + } else if (matchesAny(keyspace, regexExcludes)) { + log(keyspace, false, "it matches at least one regex exclude"); + return false; + } else { + log(keyspace, true, "it does not match any regex exclude"); + return true; + } + } else { // !regexIncludes.isEmpty() + if (regexExcludes.isEmpty()) { + if (matchesAny(keyspace, regexIncludes)) { + log(keyspace, true, "it matches at least one regex include"); + return true; + } else { + log(keyspace, false, "it does not match any regex include"); + return false; + } + } else { + if (matchesAny(keyspace, regexIncludes) && !matchesAny(keyspace, regexExcludes)) { + log(keyspace, true, "it matches at least one regex include, and no regex exclude"); + return true; + } else { + log(keyspace, false, "it matches either no regex include, or at least one regex exclude"); + return false; + } + } + } + } + + private void log(@NonNull String keyspace, boolean include, @NonNull String reason) { + if (isDebugEnabled && loggedKeyspaces.add(keyspace)) { + LOG.debug( + "[{}] Filtering {} '{}' because {}", logPrefix, include ? "in" : "out", keyspace, reason); + } + } + + private boolean matchesAny(String keyspace, List> rules) { + for (Predicate rule : rules) { + if (rule.test(keyspace)) { + return true; + } + } + return false; + } + + private Optional> compile(String regex) { + try { + return Optional.of(Pattern.compile(regex).asPredicate()); + } catch (PatternSyntaxException e) { + LOG.warn( + "[{}] Error while parsing {}: syntax error in regex /{}/ ({}), skipping", + this.logPrefix, + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES.getPath(), + regex, + e.getMessage()); + return Optional.empty(); + } + } + + private static String buildWhereClause(Set keyspaces) { + StringBuilder builder = new StringBuilder(" WHERE keyspace_name IN ("); + boolean first = true; + for (String keyspace : keyspaces) { + if (first) { + first = false; + } else { + builder.append(","); + } + builder.append('\'').append(keyspace).append('\''); + } + return builder.append(')').toString(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java index 6ab89d190ca..613f43197e2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueries.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java index 94f1ae24d78..32d1ae684ef 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,6 @@ */ package com.datastax.oss.driver.internal.core.metadata.schema.queries; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import java.util.concurrent.CompletableFuture; - public interface SchemaQueriesFactory { - SchemaQueries newInstance(CompletableFuture refreshFuture); + SchemaQueries newInstance(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java index b8242517241..0507b8cffd1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaRows.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +18,14 @@ package com.datastax.oss.driver.internal.core.metadata.schema.queries; import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.DataTypeParser; import com.datastax.oss.driver.shaded.guava.common.collect.Multimap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; /** * The system rows returned by the queries for a schema refresh, categorized by keyspace/table where @@ -32,6 +35,10 @@ */ public interface SchemaRows { + /** The node that was used to retrieve the schema information. */ + @NonNull + Node getNode(); + List keyspaces(); List virtualKeyspaces(); @@ -56,9 +63,11 @@ public interface SchemaRows { DataTypeParser dataTypeParser(); - /** - * The future to complete when the schema refresh is complete (here just to be propagated further - * down the chain). - */ - CompletableFuture refreshFuture(); + default Map> vertices() { + return new LinkedHashMap<>(); + } + + default Map> edges() { + return new LinkedHashMap<>(); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java index 0838b26e728..86a4d1912f4 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefresh.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,7 +32,6 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.Sets; import java.util.Map; -import java.util.Objects; import java.util.function.BiFunction; import java.util.function.Function; import net.jcip.annotations.ThreadSafe; @@ -62,12 +63,6 @@ public Result compute( oldMetadata.withSchema(this.newKeyspaces, tokenMapEnabled, context), events.build()); } - private static boolean shallowEquals(KeyspaceMetadata keyspace1, KeyspaceMetadata keyspace2) { - return Objects.equals(keyspace1.getName(), keyspace2.getName()) - && keyspace1.isDurableWrites() == keyspace2.isDurableWrites() - && Objects.equals(keyspace1.getReplication(), keyspace2.getReplication()); - } - /** * Computes the exact set of events to emit when a keyspace has changed. * @@ -83,7 +78,7 @@ private void computeEvents( if (oldKeyspace == null) { events.add(KeyspaceChangeEvent.created(newKeyspace)); } else { - if (!shallowEquals(oldKeyspace, newKeyspace)) { + if (!oldKeyspace.shallowEquals(newKeyspace)) { events.add(KeyspaceChangeEvent.updated(oldKeyspace, newKeyspace)); } computeChildEvents(oldKeyspace, newKeyspace, events); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java index 3006151648a..ff7642d0c18 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedToken.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java index c53296f1878..5dc3aa3aa45 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java index 529c5a93303..7e95b7c01c9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRange.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java new file mode 100644 index 00000000000..099d8b55129 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/CanonicalNodeSetBuilder.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.token; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import net.jcip.annotations.NotThreadSafe; + +/** + * A reusable set builder that guarantees that identical sets (same elements in the same order) will + * be represented by the same instance. + */ +@NotThreadSafe +class CanonicalNodeSetBuilder { + + private final Map, Set> canonicalSets = new HashMap<>(); + private final List elements = new ArrayList<>(); + + void add(Node node) { + // This is O(n), but the cardinality is low (max possible size is the replication factor). + if (!elements.contains(node)) { + elements.add(node); + } + } + + int size() { + return elements.size(); + } + + Set build() { + return canonicalSets.computeIfAbsent(elements, ImmutableSet::copyOf); + } + + void clear() { + elements.clear(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java index 603d1af07bf..a5da85195c6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultReplicationStrategyFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,6 +43,8 @@ public ReplicationStrategy newInstance(Map replicationConfig) { return new SimpleReplicationStrategy(replicationConfig); case "org.apache.cassandra.locator.NetworkTopologyStrategy": return new NetworkTopologyReplicationStrategy(replicationConfig, logPrefix); + case "org.apache.cassandra.locator.EverywhereStrategy": + return new EverywhereReplicationStrategy(); default: throw new IllegalArgumentException("Unsupported replication strategy: " + strategyClass); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java index 8717e4fb9d5..8226ddee2c7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenFactoryRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java index 0eeb399d672..8c59fb73847 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMap.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java new file mode 100644 index 00000000000..1973c07f5f8 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/EverywhereReplicationStrategy.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.token; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class EverywhereReplicationStrategy implements ReplicationStrategy { + + @Override + public Map> computeReplicasByToken( + Map tokenToPrimary, List ring) { + ImmutableMap.Builder> result = ImmutableMap.builder(); + Set allNodes = ImmutableSet.copyOf(tokenToPrimary.values()); + for (Token token : tokenToPrimary.keySet()) { + result = result.put(token, allNodes); + } + return result.build(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java index d5f6937dd93..80bad8a36b1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/KeyspaceTokenMap.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -52,8 +54,7 @@ static KeyspaceTokenMap build( try { ReplicationStrategy strategy = replicationStrategyFactory.newInstance(replicationConfig); - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); SetMultimap tokenRangesByNode; if (ring.size() == 1) { // We forced the single range to ]minToken,minToken], make sure to use that instead of @@ -79,13 +80,13 @@ static KeyspaceTokenMap build( private final List ring; private final SetMultimap tokenRangesByNode; - private final SetMultimap replicasByToken; + private final Map> replicasByToken; private final TokenFactory tokenFactory; private KeyspaceTokenMap( List ring, SetMultimap tokenRangesByNode, - SetMultimap replicasByToken, + Map> replicasByToken, TokenFactory tokenFactory) { this.ring = ring; this.tokenRangesByNode = tokenRangesByNode; @@ -104,7 +105,7 @@ Set getReplicas(ByteBuffer partitionKey) { Set getReplicas(Token token) { // If the token happens to be one of the "primary" tokens, get result directly Set nodes = replicasByToken.get(token); - if (!nodes.isEmpty()) { + if (nodes != null) { return nodes; } // Otherwise, find the closest "primary" token on the ring @@ -119,7 +120,7 @@ Set getReplicas(Token token) { } private static SetMultimap buildTokenRangesByNode( - Set tokenRanges, SetMultimap replicasByToken) { + Set tokenRanges, Map> replicasByToken) { ImmutableSetMultimap.Builder result = ImmutableSetMultimap.builder(); for (TokenRange range : tokenRanges) { for (Node node : replicasByToken.get(range.getEnd())) { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java index 6e1395fbf2d..916947e598c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/LocalReplicationStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,21 +19,23 @@ import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import java.util.List; import java.util.Map; +import java.util.Set; import net.jcip.annotations.ThreadSafe; @ThreadSafe class LocalReplicationStrategy implements ReplicationStrategy { @Override - public SetMultimap computeReplicasByToken( + public Map> computeReplicasByToken( Map tokenToPrimary, List ring) { - ImmutableSetMultimap.Builder result = ImmutableSetMultimap.builder(); + ImmutableMap.Builder> result = ImmutableMap.builder(); + // Each token maps to exactly one node for (Map.Entry entry : tokenToPrimary.entrySet()) { - result.put(entry.getKey(), entry.getValue()); + result.put(entry.getKey(), ImmutableSet.of(entry.getValue())); } return result.build(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java index fdf67ad2461..1b3072d4f22 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3Token.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java index 252c59f671b..2d4dc975a63 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java index 5586204375f..2a87cd2c3b6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRange.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java index 8315ccac2ab..0ed81083ad6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,9 +20,7 @@ import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; import com.datastax.oss.driver.shaded.guava.common.collect.Maps; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; import com.datastax.oss.driver.shaded.guava.common.collect.Sets; import java.util.HashMap; import java.util.HashSet; @@ -40,15 +40,15 @@ class NetworkTopologyReplicationStrategy implements ReplicationStrategy { LoggerFactory.getLogger(NetworkTopologyReplicationStrategy.class); private final Map replicationConfig; - private final Map replicationFactors; + private final Map replicationFactors; private final String logPrefix; NetworkTopologyReplicationStrategy(Map replicationConfig, String logPrefix) { this.replicationConfig = replicationConfig; - ImmutableMap.Builder factorsBuilder = ImmutableMap.builder(); + ImmutableMap.Builder factorsBuilder = ImmutableMap.builder(); for (Map.Entry entry : replicationConfig.entrySet()) { if (!entry.getKey().equals("class")) { - factorsBuilder.put(entry.getKey(), Integer.parseInt(entry.getValue())); + factorsBuilder.put(entry.getKey(), ReplicationFactor.fromString(entry.getValue())); } } this.replicationFactors = factorsBuilder.build(); @@ -56,21 +56,26 @@ class NetworkTopologyReplicationStrategy implements ReplicationStrategy { } @Override - public SetMultimap computeReplicasByToken( + public Map> computeReplicasByToken( Map tokenToPrimary, List ring) { - // This is essentially a copy of org.apache.cassandra.locator.NetworkTopologyStrategy - ImmutableSetMultimap.Builder result = ImmutableSetMultimap.builder(); + // The implementation of this method was adapted from + // org.apache.cassandra.locator.NetworkTopologyStrategy + + ImmutableMap.Builder> result = ImmutableMap.builder(); Map> racks = getRacksInDcs(tokenToPrimary.values()); Map dcNodeCount = Maps.newHashMapWithExpectedSize(replicationFactors.size()); Set warnedDcs = Sets.newHashSetWithExpectedSize(replicationFactors.size()); + CanonicalNodeSetBuilder replicasBuilder = new CanonicalNodeSetBuilder(); + // find maximum number of nodes in each DC for (Node node : Sets.newHashSet(tokenToPrimary.values())) { String dc = node.getDatacenter(); - dcNodeCount.putIfAbsent(dc, 0); - dcNodeCount.put(dc, dcNodeCount.get(dc) + 1); + dcNodeCount.merge(dc, 1, Integer::sum); } for (int i = 0; i < ring.size(); i++) { + replicasBuilder.clear(); + Map> allDcReplicas = new HashMap<>(); Map> seenRacks = new HashMap<>(); Map> skippedDcEndpoints = new HashMap<>(); @@ -80,30 +85,30 @@ public SetMultimap computeReplicasByToken( skippedDcEndpoints.put(dc, new LinkedHashSet<>()); // preserve order } - // Preserve order - primary replica will be first - Set replicas = new LinkedHashSet<>(); for (int j = 0; j < ring.size() && !allDone(allDcReplicas, dcNodeCount); j++) { Node h = tokenToPrimary.get(getTokenWrapping(i + j, ring)); String dc = h.getDatacenter(); if (dc == null || !allDcReplicas.containsKey(dc)) { continue; } - Integer rf = replicationFactors.get(dc); + ReplicationFactor dcConfig = replicationFactors.get(dc); + assert dcConfig != null; // since allDcReplicas.containsKey(dc) + int rf = dcConfig.fullReplicas(); Set dcReplicas = allDcReplicas.get(dc); - if (rf == null || dcReplicas.size() >= rf) { + if (dcReplicas.size() >= rf) { continue; } String rack = h.getRack(); // Check if we already visited all racks in dc if (rack == null || seenRacks.get(dc).size() == racks.get(dc).size()) { - replicas.add(h); + replicasBuilder.add(h); dcReplicas.add(h); } else { // Is this a new rack? if (seenRacks.get(dc).contains(rack)) { skippedDcEndpoints.get(dc).add(h); } else { - replicas.add(h); + replicasBuilder.add(h); dcReplicas.add(h); seenRacks.get(dc).add(rack); // If we've run out of distinct racks, add the nodes skipped so far @@ -111,7 +116,7 @@ public SetMultimap computeReplicasByToken( Iterator skippedIt = skippedDcEndpoints.get(dc).iterator(); while (skippedIt.hasNext() && dcReplicas.size() < rf) { Node nextSkipped = skippedIt.next(); - replicas.add(nextSkipped); + replicasBuilder.add(nextSkipped); dcReplicas.add(nextSkipped); } } @@ -123,7 +128,7 @@ public SetMultimap computeReplicasByToken( // Warn the user because that leads to quadratic performance of this method (JAVA-702). for (Map.Entry> entry : allDcReplicas.entrySet()) { String dcName = entry.getKey(); - int expectedFactor = replicationFactors.get(dcName); + int expectedFactor = replicationFactors.get(dcName).fullReplicas(); int achievedFactor = entry.getValue().size(); if (achievedFactor < expectedFactor && !warnedDcs.contains(dcName)) { LOG.warn( @@ -139,7 +144,7 @@ public SetMultimap computeReplicasByToken( } } - result.putAll(ring.get(i), replicas); + result.put(ring.get(i), replicasBuilder.build()); } return result.build(); } @@ -148,7 +153,7 @@ private boolean allDone(Map> map, Map dcNodeC for (Map.Entry> entry : map.entrySet()) { String dc = entry.getKey(); int dcCount = (dcNodeCount.get(dc) == null) ? 0 : dcNodeCount.get(dc); - if (entry.getValue().size() < Math.min(replicationFactors.get(dc), dcCount)) { + if (entry.getValue().size() < Math.min(replicationFactors.get(dc).fullReplicas(), dcCount)) { return false; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java index b9f6f2d2fb1..52e32fef522 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomToken.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java index b4e9fdaa28a..59f1bcc865b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java index 74dc36265de..d1a98a185db 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRange.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java new file mode 100644 index 00000000000..966372da621 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactor.java @@ -0,0 +1,82 @@ +package com.datastax.oss.driver.internal.core.metadata.token; +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.util.Objects; + +// This class is a subset of server version at org.apache.cassandra.locator.ReplicationFactor +public class ReplicationFactor { + private final int allReplicas; + private final int fullReplicas; + private final int transientReplicas; + + public ReplicationFactor(int allReplicas, int transientReplicas) { + this.allReplicas = allReplicas; + this.transientReplicas = transientReplicas; + this.fullReplicas = allReplicas - transientReplicas; + } + + public ReplicationFactor(int allReplicas) { + this(allReplicas, 0); + } + + public int fullReplicas() { + return fullReplicas; + } + + public int transientReplicas() { + return transientReplicas; + } + + public boolean hasTransientReplicas() { + return allReplicas != fullReplicas; + } + + public static ReplicationFactor fromString(String s) { + if (s.contains("/")) { + + int slash = s.indexOf('/'); + String allPart = s.substring(0, slash); + String transientPart = s.substring(slash + 1); + return new ReplicationFactor(Integer.parseInt(allPart), Integer.parseInt(transientPart)); + } else { + return new ReplicationFactor(Integer.parseInt(s), 0); + } + } + + @Override + public String toString() { + return allReplicas + (hasTransientReplicas() ? "/" + transientReplicas() : ""); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReplicationFactor)) { + return false; + } + ReplicationFactor that = (ReplicationFactor) o; + return allReplicas == that.allReplicas && fullReplicas == that.fullReplicas; + } + + @Override + public int hashCode() { + return Objects.hash(allReplicas, fullReplicas); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java index 1049c66c81b..e16841e5107 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +19,10 @@ import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.token.Token; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; import java.util.List; import java.util.Map; +import java.util.Set; public interface ReplicationStrategy { - SetMultimap computeReplicasByToken( - Map tokenToPrimary, List ring); + Map> computeReplicasByToken(Map tokenToPrimary, List ring); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java index 2b7bff0316c..4f01d2ac920 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationStrategyFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java index 4e02dee46bd..db2c16112a1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,9 +21,7 @@ import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSetMultimap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; -import java.util.LinkedHashSet; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -30,31 +30,32 @@ @ThreadSafe class SimpleReplicationStrategy implements ReplicationStrategy { - private final int replicationFactor; + private final ReplicationFactor replicationFactor; SimpleReplicationStrategy(Map replicationConfig) { this(extractReplicationFactor(replicationConfig)); } @VisibleForTesting - SimpleReplicationStrategy(int replicationFactor) { + SimpleReplicationStrategy(ReplicationFactor replicationFactor) { this.replicationFactor = replicationFactor; } @Override - public SetMultimap computeReplicasByToken( + public Map> computeReplicasByToken( Map tokenToPrimary, List ring) { - int rf = Math.min(replicationFactor, ring.size()); + int rf = Math.min(replicationFactor.fullReplicas(), ring.size()); + + ImmutableMap.Builder> result = ImmutableMap.builder(); + CanonicalNodeSetBuilder replicasBuilder = new CanonicalNodeSetBuilder(); - ImmutableSetMultimap.Builder result = ImmutableSetMultimap.builder(); for (int i = 0; i < ring.size(); i++) { - // Consecutive sections of the ring can be assigned to the same node - Set replicas = new LinkedHashSet<>(); - for (int j = 0; j < ring.size() && replicas.size() < rf; j++) { - replicas.add(tokenToPrimary.get(getTokenWrapping(i + j, ring))); + replicasBuilder.clear(); + for (int j = 0; j < ring.size() && replicasBuilder.size() < rf; j++) { + replicasBuilder.add(tokenToPrimary.get(getTokenWrapping(i + j, ring))); } - result.putAll(ring.get(i), replicas); + result.put(ring.get(i), replicasBuilder.build()); } return result.build(); } @@ -63,9 +64,9 @@ private static Token getTokenWrapping(int i, List ring) { return ring.get(i % ring.size()); } - private static int extractReplicationFactor(Map replicationConfig) { + private static ReplicationFactor extractReplicationFactor(Map replicationConfig) { String factorString = replicationConfig.get("replication_factor"); Preconditions.checkNotNull(factorString, "Missing replication factor in " + replicationConfig); - return Integer.parseInt(factorString); + return ReplicationFactor.fromString(factorString); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java index e727a36cbb2..8a1731be385 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java index 740d14ce924..f7e31da9870 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenFactoryRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java index 4c3ffe21b50..f63f9dd1ab4 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -159,8 +161,8 @@ public List intersectWith(@NonNull TokenRange that) { if (t1.intersects(t2)) { intersected.add( newTokenRange( - (contains(t1, t2.getStart(), true)) ? t2.getStart() : t1.getStart(), - (contains(t1, t2.getEnd(), false)) ? t2.getEnd() : t1.getEnd())); + contains(t1, t2.getStart(), true) ? t2.getStart() : t1.getStart(), + contains(t1, t2.getEnd(), false) ? t2.getEnd() : t1.getEnd())); } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java new file mode 100644 index 00000000000..3d7dc50a7c0 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/AbstractMetricUpdater.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; +import com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler; +import com.datastax.oss.driver.shaded.guava.common.cache.Cache; +import edu.umd.cs.findbugs.annotations.Nullable; +import io.netty.util.Timeout; +import java.time.Duration; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class AbstractMetricUpdater implements MetricUpdater { + + private static final Logger LOG = LoggerFactory.getLogger(AbstractMetricUpdater.class); + + // Not final for testing purposes + public static Duration MIN_EXPIRE_AFTER = Duration.ofMinutes(5); + + protected final InternalDriverContext context; + protected final Set enabledMetrics; + + private final AtomicReference metricsExpirationTimeoutRef = new AtomicReference<>(); + private final Duration expireAfter; + + protected AbstractMetricUpdater(InternalDriverContext context, Set enabledMetrics) { + this.context = context; + this.enabledMetrics = enabledMetrics; + DriverExecutionProfile config = context.getConfig().getDefaultProfile(); + Duration expireAfter = config.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER); + if (expireAfter.compareTo(MIN_EXPIRE_AFTER) < 0) { + LOG.warn( + "[{}] Value too low for {}: {}. Forcing to {} instead.", + context.getSessionName(), + DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), + expireAfter, + MIN_EXPIRE_AFTER); + expireAfter = MIN_EXPIRE_AFTER; + } + this.expireAfter = expireAfter; + } + + @Override + public boolean isEnabled(MetricT metric, String profileName) { + return enabledMetrics.contains(metric); + } + + public Duration getExpireAfter() { + return expireAfter; + } + + protected int connectedNodes() { + int count = 0; + for (Node node : context.getMetadataManager().getMetadata().getNodes().values()) { + if (node.getOpenConnections() > 0) { + count++; + } + } + return count; + } + + protected int throttlingQueueSize() { + RequestThrottler requestThrottler = context.getRequestThrottler(); + if (requestThrottler instanceof ConcurrencyLimitingRequestThrottler) { + return ((ConcurrencyLimitingRequestThrottler) requestThrottler).getQueueSize(); + } + if (requestThrottler instanceof RateLimitingRequestThrottler) { + return ((RateLimitingRequestThrottler) requestThrottler).getQueueSize(); + } + LOG.warn( + "[{}] Metric {} does not support {}, it will always return 0", + context.getSessionName(), + DefaultSessionMetric.THROTTLING_QUEUE_SIZE.getPath(), + requestThrottler.getClass().getName()); + return 0; + } + + protected long preparedStatementCacheSize() { + Cache cache = getPreparedStatementCache(); + if (cache == null) { + LOG.warn( + "[{}] Metric {} is enabled in the config, " + + "but it looks like no CQL prepare processor is registered. " + + "The gauge will always return 0", + context.getSessionName(), + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath()); + return 0L; + } + return cache.size(); + } + + @Nullable + protected Cache getPreparedStatementCache() { + // By default, both the sync processor and the async ones are registered and they share the same + // cache. But with a custom processor registry, there could be only one of the two present. + for (RequestProcessor processor : context.getRequestProcessorRegistry().getProcessors()) { + if (processor instanceof CqlPrepareAsyncProcessor) { + return ((CqlPrepareAsyncProcessor) processor).getCache(); + } else if (processor instanceof CqlPrepareSyncProcessor) { + return ((CqlPrepareSyncProcessor) processor).getCache(); + } + } + return null; + } + + protected int availableStreamIds(Node node) { + ChannelPool pool = context.getPoolManager().getPools().get(node); + return (pool == null) ? 0 : pool.getAvailableIds(); + } + + protected int inFlightRequests(Node node) { + ChannelPool pool = context.getPoolManager().getPools().get(node); + return (pool == null) ? 0 : pool.getInFlight(); + } + + protected int orphanedStreamIds(Node node) { + ChannelPool pool = context.getPoolManager().getPools().get(node); + return (pool == null) ? 0 : pool.getOrphanedIds(); + } + + protected void startMetricsExpirationTimeout() { + metricsExpirationTimeoutRef.accumulateAndGet( + newTimeout(), + (current, update) -> { + if (current == null) { + return update; + } else { + update.cancel(); + return current; + } + }); + } + + protected void cancelMetricsExpirationTimeout() { + Timeout t = metricsExpirationTimeoutRef.getAndSet(null); + if (t != null) { + t.cancel(); + } + } + + protected Timeout newTimeout() { + return context + .getNettyOptions() + .getTimer() + .newTimeout( + t -> { + clearMetrics(); + cancelMetricsExpirationTimeout(); + }, + expireAfter.toNanos(), + TimeUnit.NANOSECONDS); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java new file mode 100644 index 00000000000..c1c2e80e387 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricId.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Objects; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public final class DefaultMetricId implements MetricId { + + private final String name; + private final ImmutableMap tags; + + public DefaultMetricId(String name, Map tags) { + this.name = Objects.requireNonNull(name, "name cannot be null"); + this.tags = ImmutableMap.copyOf(Objects.requireNonNull(tags, "tags cannot be null")); + } + + @NonNull + @Override + public String getName() { + return name; + } + + @NonNull + @Override + public Map getTags() { + return tags; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DefaultMetricId that = (DefaultMetricId) o; + return name.equals(that.name) && tags.equals(that.tags); + } + + @Override + public int hashCode() { + return Objects.hash(name, tags); + } + + @Override + public String toString() { + return name + tags; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java new file mode 100644 index 00000000000..d4bacb35df9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGenerator.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; + +/** + * The default {@link MetricIdGenerator}. + * + *

      This generator generates unique names, containing the session name, the node endpoint (for + * node metrics), and the metric prefix. It does not generate tags. + */ +public class DefaultMetricIdGenerator implements MetricIdGenerator { + + private final String sessionPrefix; + private final String nodePrefix; + + @SuppressWarnings("unused") + public DefaultMetricIdGenerator(DriverContext context) { + String sessionName = context.getSessionName(); + String prefix = + Objects.requireNonNull( + context + .getConfig() + .getDefaultProfile() + .getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")); + sessionPrefix = prefix.isEmpty() ? sessionName + '.' : prefix + '.' + sessionName + '.'; + nodePrefix = sessionPrefix + "nodes."; + } + + @NonNull + @Override + public MetricId sessionMetricId(@NonNull SessionMetric metric) { + return new DefaultMetricId(sessionPrefix + metric.getPath(), ImmutableMap.of()); + } + + @NonNull + @Override + public MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric) { + return new DefaultMetricId( + nodePrefix + node.getEndPoint().asMetricPrefix() + '.' + metric.getPath(), + ImmutableMap.of()); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java index 6c9079bba8f..b15dc955760 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetrics.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java new file mode 100644 index 00000000000..7869f8a8af6 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactory.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static com.datastax.oss.driver.internal.core.util.Dependency.DROPWIZARD; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; +import java.util.Optional; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class DefaultMetricsFactory implements MetricsFactory { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultMetricsFactory.class); + + private final MetricsFactory delegate; + + @SuppressWarnings("unused") + public DefaultMetricsFactory(DriverContext context) { + if (DefaultDependencyChecker.isPresent(DROPWIZARD)) { + this.delegate = new DropwizardMetricsFactory(context); + } else { + this.delegate = new NoopMetricsFactory(context); + } + LOG.debug("[{}] Using {}", context.getSessionName(), delegate.getClass().getSimpleName()); + } + + @Override + public Optional getMetrics() { + return delegate.getMetrics(); + } + + @Override + public SessionMetricUpdater getSessionUpdater() { + return delegate.getSessionUpdater(); + } + + @Override + public NodeMetricUpdater newNodeUpdater(Node node) { + return delegate.newNodeUpdater(node); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java new file mode 100644 index 00000000000..8332cdcca18 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricsFactorySubstitutions.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static com.datastax.oss.driver.internal.core.util.Dependency.DROPWIZARD; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.Delete; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import com.oracle.svm.core.annotate.TargetElement; +import java.util.function.BooleanSupplier; + +@SuppressWarnings("unused") +public class DefaultMetricsFactorySubstitutions { + + @TargetClass(value = DefaultMetricsFactory.class, onlyWith = DropwizardMissing.class) + public static final class DefaultMetricsFactoryDropwizardMissing { + + @Alias + @TargetElement(name = "delegate") + @SuppressWarnings({"FieldCanBeLocal", "FieldMayBeFinal"}) + private MetricsFactory delegate; + + @Substitute + @TargetElement(name = TargetElement.CONSTRUCTOR_NAME) + public DefaultMetricsFactoryDropwizardMissing(DriverContext context) { + this.delegate = new NoopMetricsFactory(context); + } + } + + @TargetClass(value = DropwizardMetricsFactory.class, onlyWith = DropwizardMissing.class) + @Delete + public static final class DeleteDropwizardMetricsFactory {} + + public static class DropwizardMissing implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return !GraalDependencyChecker.isPresent(DROPWIZARD); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java index 0c47637d780..9377fb3a17e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,110 +17,183 @@ */ package com.datastax.oss.driver.internal.core.metrics; +import com.codahale.metrics.Counter; +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; import com.codahale.metrics.Metric; import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.Reservoir; import com.codahale.metrics.Timer; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import edu.umd.cs.findbugs.annotations.Nullable; import java.time.Duration; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @ThreadSafe -public abstract class DropwizardMetricUpdater implements MetricUpdater { +public abstract class DropwizardMetricUpdater extends AbstractMetricUpdater { private static final Logger LOG = LoggerFactory.getLogger(DropwizardMetricUpdater.class); - protected final Set enabledMetrics; protected final MetricRegistry registry; - protected DropwizardMetricUpdater(Set enabledMetrics, MetricRegistry registry) { - this.enabledMetrics = enabledMetrics; + protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); + + protected final ConcurrentMap reservoirs = new ConcurrentHashMap<>(); + + protected DropwizardMetricUpdater( + InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { + super(context, enabledMetrics); this.registry = registry; } - protected abstract String buildFullName(MetricT metric, String profileName); + @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) + public T getMetric( + MetricT metric, @SuppressWarnings("unused") String profileName) { + return (T) metrics.get(metric); + } @Override - public void incrementCounter(MetricT metric, String profileName, long amount) { + public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { if (isEnabled(metric, profileName)) { - registry.counter(buildFullName(metric, profileName)).inc(amount); + getOrCreateCounterFor(metric).inc(amount); } } @Override - public void updateHistogram(MetricT metric, String profileName, long value) { + public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { if (isEnabled(metric, profileName)) { - registry.histogram(buildFullName(metric, profileName)).update(value); + getOrCreateHistogramFor(metric).update(value); } } @Override - public void markMeter(MetricT metric, String profileName, long amount) { + public void markMeter(MetricT metric, @Nullable String profileName, long amount) { if (isEnabled(metric, profileName)) { - registry.meter(buildFullName(metric, profileName)).mark(amount); + getOrCreateMeterFor(metric).mark(amount); } } @Override - public void updateTimer(MetricT metric, String profileName, long duration, TimeUnit unit) { + public void updateTimer( + MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { if (isEnabled(metric, profileName)) { - registry.timer(buildFullName(metric, profileName)).update(duration, unit); + getOrCreateTimerFor(metric).update(duration, unit); } } - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) - public T getMetric(MetricT metric, String profileName) { - return (T) registry.getMetrics().get(buildFullName(metric, profileName)); + @Override + public void clearMetrics() { + for (MetricT metric : metrics.keySet()) { + MetricId id = getMetricId(metric); + registry.remove(id.getName()); + } + metrics.clear(); + reservoirs.clear(); } - @Override - public boolean isEnabled(MetricT metric, String profileName) { - return enabledMetrics.contains(metric); + protected abstract MetricId getMetricId(MetricT metric); + + protected void initializeGauge( + MetricT metric, DriverExecutionProfile profile, Supplier supplier) { + if (isEnabled(metric, profile.getName())) { + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + return registry.gauge(id.getName(), () -> supplier::get); + }); + } } - protected void initializeDefaultCounter(MetricT metric, String profileName) { - if (isEnabled(metric, profileName)) { - // Just initialize eagerly so that the metric appears even when it has no data yet - registry.counter(buildFullName(metric, profileName)); + protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { + if (isEnabled(metric, profile.getName())) { + getOrCreateCounterFor(metric); } } protected void initializeHdrTimer( MetricT metric, - DriverExecutionProfile config, + DriverExecutionProfile profile, + DriverOption highestLatency, + DriverOption significantDigits, + DriverOption interval) { + if (isEnabled(metric, profile.getName())) { + reservoirs.computeIfAbsent( + metric, m -> createHdrReservoir(m, profile, highestLatency, significantDigits, interval)); + getOrCreateTimerFor(metric); + } + } + + protected Counter getOrCreateCounterFor(MetricT metric) { + return (Counter) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + return registry.counter(id.getName()); + }); + } + + protected Meter getOrCreateMeterFor(MetricT metric) { + return (Meter) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + return registry.meter(id.getName()); + }); + } + + protected Histogram getOrCreateHistogramFor(MetricT metric) { + return (Histogram) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + return registry.histogram(id.getName()); + }); + } + + protected Timer getOrCreateTimerFor(MetricT metric) { + return (Timer) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Reservoir reservoir = reservoirs.get(metric); + Timer timer = reservoir == null ? new Timer() : new Timer(reservoir); + return registry.timer(id.getName(), () -> timer); + }); + } + + protected HdrReservoir createHdrReservoir( + MetricT metric, + DriverExecutionProfile profile, DriverOption highestLatencyOption, DriverOption significantDigitsOption, DriverOption intervalOption) { - String profileName = config.getName(); - if (isEnabled(metric, profileName)) { - String fullName = buildFullName(metric, profileName); - - Duration highestLatency = config.getDuration(highestLatencyOption); - final int significantDigits; - int d = config.getInt(significantDigitsOption); - if (d >= 0 && d <= 5) { - significantDigits = d; - } else { - LOG.warn( - "[{}] Configuration option {} is out of range (expected between 0 and 5, found {}); " - + "using 3 instead.", - fullName, - significantDigitsOption, - d); - significantDigits = 3; - } - Duration refreshInterval = config.getDuration(intervalOption); - - // Initialize eagerly to use the custom implementation - registry.timer( - fullName, - () -> - new Timer( - new HdrReservoir(highestLatency, significantDigits, refreshInterval, fullName))); + MetricId id = getMetricId(metric); + Duration highestLatency = profile.getDuration(highestLatencyOption); + int significantDigits = profile.getInt(significantDigitsOption); + if (significantDigits < 0 || significantDigits > 5) { + LOG.warn( + "[{}] Configuration option {} is out of range (expected between 0 and 5, " + + "found {}); using 3 instead.", + id.getName(), + significantDigitsOption, + significantDigits); + significantDigits = 3; } + Duration refreshInterval = profile.getDuration(intervalOption); + return new HdrReservoir(highestLatency, significantDigits, refreshInterval, id.getName()); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java index 76e9cb8965a..5f28f8f5060 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,17 +20,17 @@ import com.codahale.metrics.MetricRegistry; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.api.core.metrics.Metrics; import com.datastax.oss.driver.api.core.metrics.NodeMetric; import com.datastax.oss.driver.api.core.metrics.SessionMetric; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; +import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; +import io.netty.util.concurrent.EventExecutor; import java.util.Optional; import java.util.Set; import net.jcip.annotations.ThreadSafe; @@ -40,34 +42,59 @@ public class DropwizardMetricsFactory implements MetricsFactory { private static final Logger LOG = LoggerFactory.getLogger(DropwizardMetricsFactory.class); - private final String logPrefix; private final InternalDriverContext context; private final Set enabledNodeMetrics; private final MetricRegistry registry; @Nullable private final Metrics metrics; private final SessionMetricUpdater sessionUpdater; - public DropwizardMetricsFactory(InternalDriverContext context) { - this.logPrefix = context.getSessionName(); - this.context = context; - + public DropwizardMetricsFactory(DriverContext context) { + this.context = (InternalDriverContext) context; + String logPrefix = context.getSessionName(); DriverExecutionProfile config = context.getConfig().getDefaultProfile(); Set enabledSessionMetrics = - parseSessionMetricPaths(config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)); + MetricPaths.parseSessionMetricPaths( + config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); this.enabledNodeMetrics = - parseNodeMetricPaths(config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED)); - + MetricPaths.parseNodeMetricPaths( + config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { LOG.debug("[{}] All metrics are disabled, Session.getMetrics will be empty", logPrefix); this.registry = null; this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; this.metrics = null; } else { - this.registry = new MetricRegistry(); - DropwizardSessionMetricUpdater dropwizardSessionUpdater = - new DropwizardSessionMetricUpdater(enabledSessionMetrics, registry, context); - this.sessionUpdater = dropwizardSessionUpdater; - this.metrics = new DefaultMetrics(registry, dropwizardSessionUpdater); + // try to get the metric registry from the context + Object possibleMetricRegistry = this.context.getMetricRegistry(); + if (possibleMetricRegistry == null) { + // metrics are enabled, but a metric registry was not supplied to the context + // create a registry object + possibleMetricRegistry = new MetricRegistry(); + } + if (possibleMetricRegistry instanceof MetricRegistry) { + this.registry = (MetricRegistry) possibleMetricRegistry; + DropwizardSessionMetricUpdater dropwizardSessionUpdater = + new DropwizardSessionMetricUpdater(this.context, enabledSessionMetrics, registry); + this.sessionUpdater = dropwizardSessionUpdater; + this.metrics = new DefaultMetrics(registry, dropwizardSessionUpdater); + } else { + // Metrics are enabled, but the registry object is not an expected type + throw new IllegalArgumentException( + "Unexpected Metrics registry object. Expected registry object to be of type '" + + MetricRegistry.class.getName() + + "', but was '" + + possibleMetricRegistry.getClass().getName() + + "'"); + } + if (!enabledNodeMetrics.isEmpty()) { + EventExecutor adminEventExecutor = + this.context.getNettyOptions().adminEventExecutorGroup().next(); + this.context + .getEventBus() + .register( + NodeStateEvent.class, + RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); + } } } @@ -83,32 +110,23 @@ public SessionMetricUpdater getSessionUpdater() { @Override public NodeMetricUpdater newNodeUpdater(Node node) { - return (registry == null) - ? NoopNodeMetricUpdater.INSTANCE - : new DropwizardNodeMetricUpdater(node, enabledNodeMetrics, registry, context); - } - - protected Set parseSessionMetricPaths(List paths) { - EnumSet result = EnumSet.noneOf(DefaultSessionMetric.class); - for (String path : paths) { - try { - result.add(DefaultSessionMetric.fromPath(path)); - } catch (IllegalArgumentException e) { - LOG.warn("[{}] Unknown session metric {}, skipping", logPrefix, path); - } + if (registry == null) { + return NoopNodeMetricUpdater.INSTANCE; + } else { + return new DropwizardNodeMetricUpdater(node, context, enabledNodeMetrics, registry); } - return Collections.unmodifiableSet(result); } - protected Set parseNodeMetricPaths(List paths) { - EnumSet result = EnumSet.noneOf(DefaultNodeMetric.class); - for (String path : paths) { - try { - result.add(DefaultNodeMetric.fromPath(path)); - } catch (IllegalArgumentException e) { - LOG.warn("[{}] Unknown node metric {}, skipping", logPrefix, path); - } + protected void processNodeStateEvent(NodeStateEvent event) { + if (event.newState == NodeState.DOWN + || event.newState == NodeState.FORCED_DOWN + || event.newState == null) { + // node is DOWN or REMOVED + ((DropwizardNodeMetricUpdater) event.node.getMetricUpdater()).startMetricsExpirationTimeout(); + } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { + // node is UP or ADDED + ((DropwizardNodeMetricUpdater) event.node.getMetricUpdater()) + .cancelMetricsExpirationTimeout(); } - return Collections.unmodifiableSet(result); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java index a4322393e29..2e5e6c8db3d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,97 +17,81 @@ */ package com.datastax.oss.driver.internal.core.metrics; -import com.codahale.metrics.Gauge; import com.codahale.metrics.MetricRegistry; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; import com.datastax.oss.driver.api.core.metrics.NodeMetric; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.pool.ChannelPool; import java.util.Set; -import java.util.function.Function; import net.jcip.annotations.ThreadSafe; @ThreadSafe public class DropwizardNodeMetricUpdater extends DropwizardMetricUpdater implements NodeMetricUpdater { - private final String metricNamePrefix; + private final Node node; public DropwizardNodeMetricUpdater( Node node, + InternalDriverContext context, Set enabledMetrics, - MetricRegistry registry, - InternalDriverContext context) { - super(enabledMetrics, registry); - this.metricNamePrefix = buildPrefix(context.getSessionName(), node.getEndPoint()); + MetricRegistry registry) { + super(context, enabledMetrics, registry); + this.node = node; - DriverExecutionProfile config = context.getConfig().getDefaultProfile(); + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + + initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); + initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); + initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); + initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); + + initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); + initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); + initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); + initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); + initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); + initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); + initializeCounter(DefaultNodeMetric.RETRIES, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); + initializeCounter(DefaultNodeMetric.IGNORES, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); + initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); + initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); + initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); - if (enabledMetrics.contains(DefaultNodeMetric.OPEN_CONNECTIONS)) { - this.registry.register( - buildFullName(DefaultNodeMetric.OPEN_CONNECTIONS, null), - (Gauge) node::getOpenConnections); - } - initializePoolGauge( - DefaultNodeMetric.AVAILABLE_STREAMS, node, ChannelPool::getAvailableIds, context); - initializePoolGauge(DefaultNodeMetric.IN_FLIGHT, node, ChannelPool::getInFlight, context); - initializePoolGauge( - DefaultNodeMetric.ORPHANED_STREAMS, node, ChannelPool::getOrphanedIds, context); initializeHdrTimer( DefaultNodeMetric.CQL_MESSAGES, - config, + profile, DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_INTERVAL); - initializeDefaultCounter(DefaultNodeMetric.UNSENT_REQUESTS, null); - initializeDefaultCounter(DefaultNodeMetric.ABORTED_REQUESTS, null); - initializeDefaultCounter(DefaultNodeMetric.WRITE_TIMEOUTS, null); - initializeDefaultCounter(DefaultNodeMetric.READ_TIMEOUTS, null); - initializeDefaultCounter(DefaultNodeMetric.UNAVAILABLES, null); - initializeDefaultCounter(DefaultNodeMetric.OTHER_ERRORS, null); - initializeDefaultCounter(DefaultNodeMetric.RETRIES, null); - initializeDefaultCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, null); - initializeDefaultCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, null); - initializeDefaultCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, null); - initializeDefaultCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, null); - initializeDefaultCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, null); - initializeDefaultCounter(DefaultNodeMetric.IGNORES, null); - initializeDefaultCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, null); - initializeDefaultCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, null); - initializeDefaultCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, null); - initializeDefaultCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, null); - initializeDefaultCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, null); - initializeDefaultCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, null); - initializeDefaultCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); - initializeDefaultCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, null); + initializeHdrTimer( + DseNodeMetric.GRAPH_MESSAGES, + profile, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_INTERVAL); } @Override - public String buildFullName(NodeMetric metric, String profileName) { - return metricNamePrefix + metric.getPath(); - } - - private String buildPrefix(String sessionName, EndPoint endPoint) { - return sessionName + ".nodes." + endPoint.asMetricPrefix() + "."; - } - - private void initializePoolGauge( - NodeMetric metric, - Node node, - Function reading, - InternalDriverContext context) { - if (enabledMetrics.contains(metric)) { - registry.register( - buildFullName(metric, null), - (Gauge) - () -> { - ChannelPool pool = context.getPoolManager().getPools().get(node); - return (pool == null) ? 0 : reading.apply(pool); - }); + protected MetricId getMetricId(NodeMetric metric) { + MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); + if (!id.getTags().isEmpty()) { + throw new IllegalStateException("Cannot use metric tags with Dropwizard"); } + return id; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java index 3a81bcad221..94e10ad6936 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/DropwizardSessionMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,125 +17,68 @@ */ package com.datastax.oss.driver.internal.core.metrics; -import com.codahale.metrics.Gauge; import com.codahale.metrics.MetricRegistry; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; import com.datastax.oss.driver.api.core.metrics.SessionMetric; -import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.session.RequestProcessor; -import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; -import com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler; -import com.datastax.oss.driver.shaded.guava.common.cache.Cache; -import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Set; import net.jcip.annotations.ThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @ThreadSafe public class DropwizardSessionMetricUpdater extends DropwizardMetricUpdater implements SessionMetricUpdater { - private static final Logger LOG = LoggerFactory.getLogger(DropwizardSessionMetricUpdater.class); + public DropwizardSessionMetricUpdater( + InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { + super(context, enabledMetrics, registry); - private final String metricNamePrefix; + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); - public DropwizardSessionMetricUpdater( - Set enabledMetrics, MetricRegistry registry, InternalDriverContext context) { - super(enabledMetrics, registry); - this.metricNamePrefix = context.getSessionName() + "."; + initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); + initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); + initializeGauge( + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); + + initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); + initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); + initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); - if (enabledMetrics.contains(DefaultSessionMetric.CONNECTED_NODES)) { - this.registry.gauge( - buildFullName(DefaultSessionMetric.CONNECTED_NODES, null), - () -> - () -> { - int count = 0; - for (Node node : context.getMetadataManager().getMetadata().getNodes().values()) { - if (node.getOpenConnections() > 0) { - count += 1; - } - } - return count; - }); - } - if (enabledMetrics.contains(DefaultSessionMetric.THROTTLING_QUEUE_SIZE)) { - this.registry.gauge( - buildFullName(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, null), - () -> buildQueueGauge(context.getRequestThrottler(), context.getSessionName())); - } - if (enabledMetrics.contains(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE)) { - this.registry.gauge( - buildFullName(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, null), - () -> { - Cache cache = getPreparedStatementCache(context); - Gauge gauge; - if (cache == null) { - LOG.warn( - "[{}] Metric {} is enabled in the config, " - + "but it looks like no CQL prepare processor is registered. " - + "The gauge will always return 0", - context.getSessionName(), - DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath()); - gauge = () -> 0L; - } else { - gauge = cache::size; - } - return gauge; - }); - } initializeHdrTimer( DefaultSessionMetric.CQL_REQUESTS, - context.getConfig().getDefaultProfile(), + profile, DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_INTERVAL); - initializeDefaultCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, null); initializeHdrTimer( DefaultSessionMetric.THROTTLING_DELAY, - context.getConfig().getDefaultProfile(), + profile, DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, DefaultDriverOption.METRICS_SESSION_THROTTLING_INTERVAL); - initializeDefaultCounter(DefaultSessionMetric.THROTTLING_ERRORS, null); + initializeHdrTimer( + DseSessionMetric.CONTINUOUS_CQL_REQUESTS, + profile, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_INTERVAL); + initializeHdrTimer( + DseSessionMetric.GRAPH_REQUESTS, + profile, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_INTERVAL); } @Override - public String buildFullName(SessionMetric metric, String profileName) { - return metricNamePrefix + metric.getPath(); - } - - private Gauge buildQueueGauge(RequestThrottler requestThrottler, String logPrefix) { - if (requestThrottler instanceof ConcurrencyLimitingRequestThrottler) { - return ((ConcurrencyLimitingRequestThrottler) requestThrottler)::getQueueSize; - } else if (requestThrottler instanceof RateLimitingRequestThrottler) { - return ((RateLimitingRequestThrottler) requestThrottler)::getQueueSize; - } else { - LOG.warn( - "[{}] Metric {} does not support {}, it will always return 0", - logPrefix, - DefaultSessionMetric.THROTTLING_QUEUE_SIZE.getPath(), - requestThrottler.getClass().getName()); - return () -> 0; - } - } - - @Nullable - private static Cache getPreparedStatementCache(InternalDriverContext context) { - // By default, both the sync processor and the async one are registered and they share the same - // cache. But with a custom processor registry, there could be only one of the two present. - for (RequestProcessor processor : context.getRequestProcessorRegistry().getProcessors()) { - if (processor instanceof CqlPrepareAsyncProcessor) { - return ((CqlPrepareAsyncProcessor) processor).getCache(); - } else if (processor instanceof CqlPrepareSyncProcessor) { - return ((CqlPrepareSyncProcessor) processor).getCache(); - } + protected MetricId getMetricId(SessionMetric metric) { + MetricId id = context.getMetricIdGenerator().sessionMetricId(metric); + if (!id.getTags().isEmpty()) { + throw new IllegalStateException("Cannot use metric tags with Dropwizard"); } - return null; + return id; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java index 44a004f8a60..c66fe1dbf8a 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/HdrReservoir.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java new file mode 100644 index 00000000000..039fb96d34b --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricId.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +/** + * The identifier of a metric. + * + *

      The driver will use the reported name and tags to register the described metric against the + * current metric registry. + * + *

      A metric identifier is unique, that is, the combination of its name and its tags is expected + * to be unique for a given metric registry. + */ +public interface MetricId { + + /** + * Returns this metric name. + * + *

      Metric names can be any non-empty string, but it is recommended to create metric names that + * have path-like structures separated by a dot, e.g. {@code path.to.my.custom.metric}. Driver + * built-in implementations of this interface abide by this rule. + * + * @return The metric name; cannot be empty nor null. + */ + @NonNull + String getName(); + + /** @return The metric tags, or empty if no tag is defined; cannot be null. */ + @NonNull + Map getTags(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java new file mode 100644 index 00000000000..7a33a81b966 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricIdGenerator.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import edu.umd.cs.findbugs.annotations.NonNull; + +/** + * A {@link MetricIdGenerator} is used to generate the unique identifiers by which a metric should + * be registered against the current metrics registry. + * + *

      The driver ships with two implementations of this interface; {@code DefaultMetricIdGenerator} + * and {@code TaggingMetricIdGenerator}. + * + *

      {@code DefaultMetricIdGenerator} is the default implementation; it generates metric + * identifiers with unique names and no tags. + * + *

      {@code TaggingMetricIdGenerator} generates metric identifiers whose uniqueness stems from the + * combination of their names and tags. + * + *

      See the driver's {@code reference.conf} file. + */ +public interface MetricIdGenerator { + + /** Generates a {@link MetricId} for the given {@link SessionMetric}. */ + @NonNull + MetricId sessionMetricId(@NonNull SessionMetric metric); + + /** Generates a {@link MetricId} for the given {@link Node} and {@link NodeMetric}. */ + @NonNull + MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java new file mode 100644 index 00000000000..92b3fc569f7 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricPaths.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MetricPaths { + + private static final Logger LOG = LoggerFactory.getLogger(MetricPaths.class); + + public static Set parseSessionMetricPaths(List paths, String logPrefix) { + Set result = new HashSet<>(); + for (String path : paths) { + try { + result.add(DefaultSessionMetric.fromPath(path)); + } catch (IllegalArgumentException e) { + try { + result.add(DseSessionMetric.fromPath(path)); + } catch (IllegalArgumentException e1) { + LOG.warn("[{}] Unknown session metric {}, skipping", logPrefix, path); + } + } + } + return Collections.unmodifiableSet(result); + } + + public static Set parseNodeMetricPaths(List paths, String logPrefix) { + Set result = new HashSet<>(); + for (String path : paths) { + try { + result.add(DefaultNodeMetric.fromPath(path)); + } catch (IllegalArgumentException e) { + try { + result.add(DseNodeMetric.fromPath(path)); + } catch (IllegalArgumentException e1) { + LOG.warn("[{}] Unknown node metric {}, skipping", logPrefix, path); + } + } + } + return Collections.unmodifiableSet(result); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java index f8dc93460b5..c07d1b136af 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,7 @@ */ package com.datastax.oss.driver.internal.core.metrics; +import edu.umd.cs.findbugs.annotations.Nullable; import java.util.concurrent.TimeUnit; /** @@ -25,21 +28,24 @@ */ public interface MetricUpdater { - void incrementCounter(MetricT metric, String profileName, long amount); + void incrementCounter(MetricT metric, @Nullable String profileName, long amount); - default void incrementCounter(MetricT metric, String profileName) { + default void incrementCounter(MetricT metric, @Nullable String profileName) { incrementCounter(metric, profileName, 1); } - void updateHistogram(MetricT metric, String profileName, long value); + // note: currently unused + void updateHistogram(MetricT metric, @Nullable String profileName, long value); - void markMeter(MetricT metric, String profileName, long amount); + void markMeter(MetricT metric, @Nullable String profileName, long amount); - default void markMeter(MetricT metric, String profileName) { + default void markMeter(MetricT metric, @Nullable String profileName) { markMeter(metric, profileName, 1); } - void updateTimer(MetricT metric, String profileName, long duration, TimeUnit unit); + void updateTimer(MetricT metric, @Nullable String profileName, long duration, TimeUnit unit); + + boolean isEnabled(MetricT metric, @Nullable String profileName); - boolean isEnabled(MetricT metric, String profileName); + void clearMetrics(); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java index 26440c42b6c..6440b79fb75 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/MetricsFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java index 4a145124d6a..93d003f0a03 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NodeMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java new file mode 100644 index 00000000000..59ebd3d314b --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactory.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import java.util.List; +import java.util.Optional; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class NoopMetricsFactory implements MetricsFactory { + + private static final Logger LOG = LoggerFactory.getLogger(NoopMetricsFactory.class); + + @SuppressWarnings("unused") + public NoopMetricsFactory(DriverContext context) { + String logPrefix = context.getSessionName(); + DriverExecutionProfile config = context.getConfig().getDefaultProfile(); + List enabledSessionMetrics = + config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED); + List enabledNodeMetrics = + config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED); + if (!enabledSessionMetrics.isEmpty() || !enabledNodeMetrics.isEmpty()) { + LOG.warn( + "[{}] Some session-level or node-level metrics were enabled, " + + "but NoopMetricsFactory is being used: all metrics will be empty", + logPrefix); + } + } + + @Override + public Optional getMetrics() { + return Optional.empty(); + } + + @Override + public SessionMetricUpdater getSessionUpdater() { + return NoopSessionMetricUpdater.INSTANCE; + } + + @Override + public NodeMetricUpdater newNodeUpdater(Node node) { + return NoopNodeMetricUpdater.INSTANCE; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java index df0f8c9bfff..8d216990331 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopNodeMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,4 +53,9 @@ public boolean isEnabled(NodeMetric metric, String profileName) { // since methods don't do anything, return false return false; } + + @Override + public void clearMetrics() { + // nothing to do + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java index a4cd33b12b7..7099a8ddcac 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/NoopSessionMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,4 +53,7 @@ public boolean isEnabled(SessionMetric metric, String profileName) { // since methods don't do anything, return false return false; } + + @Override + public void clearMetrics() {} } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java index 8aaf82dcb71..b7fc51dd134 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/SessionMetricUpdater.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java new file mode 100644 index 00000000000..393651929c9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGenerator.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; + +/** + * A {@link MetricIdGenerator} that generates metric identifiers using a combination of names and + * tags. + * + *

      Session metric identifiers contain a name starting with "session." and ending with the metric + * path, and a tag with the key "session" and the value of the current session name. + * + *

      Node metric identifiers contain a name starting with "nodes." and ending with the metric path, + * and two tags: one with the key "session" and the value of the current session name, the other + * with the key "node" and the value of the current node endpoint. + */ +public class TaggingMetricIdGenerator implements MetricIdGenerator { + + private final String sessionName; + private final String sessionPrefix; + private final String nodePrefix; + + @SuppressWarnings("unused") + public TaggingMetricIdGenerator(DriverContext context) { + sessionName = context.getSessionName(); + String prefix = + Objects.requireNonNull( + context + .getConfig() + .getDefaultProfile() + .getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")); + sessionPrefix = prefix.isEmpty() ? "session." : prefix + ".session."; + nodePrefix = prefix.isEmpty() ? "nodes." : prefix + ".nodes."; + } + + @NonNull + @Override + public MetricId sessionMetricId(@NonNull SessionMetric metric) { + return new DefaultMetricId( + sessionPrefix + metric.getPath(), ImmutableMap.of("session", sessionName)); + } + + @NonNull + @Override + public MetricId nodeMetricId(@NonNull Node node, @NonNull NodeMetric metric) { + return new DefaultMetricId( + nodePrefix + metric.getPath(), + ImmutableMap.of("session", sessionName, "node", node.getEndPoint().toString())); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java new file mode 100644 index 00000000000..dffc23c4c8f --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/CpuInfo.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import java.util.Locale; + +public class CpuInfo { + + /* Copied from equivalent op in jnr.ffi.Platform. We have to have this here as it has to be defined + * before its (multiple) uses in determineCpu() */ + private static final Locale LOCALE = Locale.ENGLISH; + + /* The remainder of this class is largely based on jnr.ffi.Platform in jnr-ffi version 2.1.10. + * We copy it manually here in order to avoid introducing an extra dependency merely for the sake of + * evaluating some system properties. + * + * jnr-ffi copyright notice follows: + * + * Copyright (C) 2008-2010 Wayne Meissner + * + * This file is part of the JNR project. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + /** The supported CPU architectures. */ + public enum Cpu { + /* + * Note The names of the enum values are used in other parts of the + * code to determine where to find the native stub library. Do NOT rename. + */ + + /** 32 bit legacy Intel */ + I386, + + /** 64 bit AMD (aka EM64T/X64) */ + X86_64, + + /** 32 bit Power PC */ + PPC, + + /** 64 bit Power PC */ + PPC64, + + /** 64 bit Power PC little endian */ + PPC64LE, + + /** 32 bit Sun sparc */ + SPARC, + + /** 64 bit Sun sparc */ + SPARCV9, + + /** IBM zSeries S/390 */ + S390X, + + /** 32 bit MIPS (used by nestedvm) */ + MIPS32, + + /** 32 bit ARM */ + ARM, + + /** 64 bit ARM */ + AARCH64, + + /** + * Unknown CPU architecture. A best effort will be made to infer architecture specific values + * such as address and long size. + */ + UNKNOWN; + + @Override + public String toString() { + return name().toLowerCase(LOCALE); + } + } + + public static Cpu determineCpu() { + String archString = System.getProperty("os.arch"); + if (equalsIgnoreCase("x86", archString) + || equalsIgnoreCase("i386", archString) + || equalsIgnoreCase("i86pc", archString) + || equalsIgnoreCase("i686", archString)) { + return Cpu.I386; + } else if (equalsIgnoreCase("x86_64", archString) || equalsIgnoreCase("amd64", archString)) { + return Cpu.X86_64; + } else if (equalsIgnoreCase("ppc", archString) || equalsIgnoreCase("powerpc", archString)) { + return Cpu.PPC; + } else if (equalsIgnoreCase("ppc64", archString) || equalsIgnoreCase("powerpc64", archString)) { + if ("little".equals(System.getProperty("sun.cpu.endian"))) { + return Cpu.PPC64LE; + } + return Cpu.PPC64; + } else if (equalsIgnoreCase("ppc64le", archString) + || equalsIgnoreCase("powerpc64le", archString)) { + return Cpu.PPC64LE; + } else if (equalsIgnoreCase("s390", archString) || equalsIgnoreCase("s390x", archString)) { + return Cpu.S390X; + } else if (equalsIgnoreCase("aarch64", archString)) { + return Cpu.AARCH64; + } else if (equalsIgnoreCase("arm", archString) || equalsIgnoreCase("armv7l", archString)) { + return Cpu.ARM; + } + + // Try to find by lookup up in the CPU list + for (Cpu cpu : Cpu.values()) { + if (equalsIgnoreCase(cpu.name(), archString)) { + return cpu; + } + } + + return Cpu.UNKNOWN; + } + + private static boolean equalsIgnoreCase(String s1, String s2) { + return s1.equalsIgnoreCase(s2) + || s1.toUpperCase(LOCALE).equals(s2.toUpperCase(LOCALE)) + || s1.toLowerCase(LOCALE).equals(s2.toLowerCase(LOCALE)); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java new file mode 100644 index 00000000000..5b57a01564c --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/EmptyLibc.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import java.util.Optional; + +/** A no-op NativeImpl implementation; useful if we can't load one of the others */ +public class EmptyLibc implements Libc { + + @Override + public boolean available() { + return false; + } + + @Override + public Optional gettimeofday() { + return Optional.empty(); + } + + @Override + public Optional getpid() { + return Optional.empty(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java new file mode 100644 index 00000000000..fc9dd8d50c7 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalGetpid.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import java.util.Collections; +import java.util.List; +import org.graalvm.nativeimage.c.CContext; +import org.graalvm.nativeimage.c.function.CFunction; + +@CContext(GraalGetpid.Directives.class) +public class GraalGetpid { + + static class Directives implements CContext.Directives { + + @Override + public List getHeaderFiles() { + + return Collections.singletonList(""); + } + } + + @CFunction + public static native int getpid(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java new file mode 100644 index 00000000000..a6535c2c653 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/GraalLibc.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import java.util.Locale; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GraalLibc implements Libc { + + private static final Logger LOG = LoggerFactory.getLogger(GraalLibc.class); + + private static final Locale LOCALE = Locale.ENGLISH; + + private static final String MAC_PLATFORM_STR = "mac".toLowerCase(LOCALE); + private static final String DARWIN_PLATFORM_STR = "darwin".toLowerCase(LOCALE); + private static final String LINUX_PLATFORM_STR = "linux".toLowerCase(LOCALE); + + private final boolean available = checkAvailability(); + + /* This method is adapted from of jnr.ffi.Platform.determineOS() in jnr-ffi version 2.1.10. **/ + private boolean checkPlatform() { + + String osName = System.getProperty("os.name").split(" ", -1)[0]; + String compareStr = osName.toLowerCase(Locale.ENGLISH); + return compareStr.startsWith(MAC_PLATFORM_STR) + || compareStr.startsWith(DARWIN_PLATFORM_STR) + || compareStr.startsWith(LINUX_PLATFORM_STR); + } + + private boolean checkAvailability() { + + if (!checkPlatform()) { + return false; + } + + try { + getpidRaw(); + } catch (Throwable t) { + + LOG.debug("Error calling getpid()", t); + return false; + } + + try { + gettimeofdayRaw(); + } catch (Throwable t) { + + LOG.debug("Error calling gettimeofday()", t); + return false; + } + + return true; + } + + @Override + public boolean available() { + return this.available; + } + + /* Substrate includes a substitution for Linux + Darwin which redefines System.nanoTime() to use + * gettimeofday() (unless platform-specific higher-res clocks are available, which is even better). */ + @Override + public Optional gettimeofday() { + return this.available ? Optional.of(gettimeofdayRaw()) : Optional.empty(); + } + + private long gettimeofdayRaw() { + return Math.round(System.nanoTime() / 1_000d); + } + + @Override + public Optional getpid() { + return this.available ? Optional.of(getpidRaw()) : Optional.empty(); + } + + private int getpidRaw() { + return GraalGetpid.getpid(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java new file mode 100644 index 00000000000..25236dee837 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibc.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import java.util.Optional; +import java.util.function.Consumer; +import jnr.posix.POSIX; +import jnr.posix.POSIXFactory; +import jnr.posix.Timeval; +import jnr.posix.util.DefaultPOSIXHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JnrLibc implements Libc { + + private static final Logger LOG = LoggerFactory.getLogger(JnrLibc.class); + + private final Optional posix; + + public JnrLibc() { + + this.posix = loadPosix(); + } + + @Override + public Optional gettimeofday() { + + return this.posix.flatMap(this::gettimeofdayImpl); + } + + @Override + public Optional getpid() { + + return this.posix.map(POSIX::getpid); + } + + @Override + public boolean available() { + return this.posix.isPresent(); + } + + private Optional loadPosix() { + + try { + return Optional.of(POSIXFactory.getPOSIX(new DefaultPOSIXHandler(), true)) + .flatMap(p -> catchAll(p, posix -> posix.getpid(), "Error calling getpid()")) + .flatMap(p -> catchAll(p, this::gettimeofdayImpl, "Error calling gettimeofday()")); + } catch (Throwable t) { + LOG.debug("Error loading POSIX", t); + return Optional.empty(); + } + } + + private Optional catchAll(POSIX posix, Consumer fn, String debugStr) { + try { + fn.accept(posix); + return Optional.of(posix); + } catch (Throwable t) { + + LOG.debug(debugStr, t); + return Optional.empty(); + } + } + + private Optional gettimeofdayImpl(POSIX posix) { + + Timeval tv = posix.allocateTimeval(); + int rv = posix.gettimeofday(tv); + if (rv != 0) { + LOG.debug("Expected 0 return value from gettimeofday(), observed " + rv); + return Optional.empty(); + } + return Optional.of(tv.sec() * 1_000_000 + tv.usec()); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java new file mode 100644 index 00000000000..532001498f4 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/JnrLibcSubstitution.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import java.util.Optional; + +/** + * Add an explicit Graal substitution for {@link JnrLibc}. If we don't implement something like this + * the analysis done at Graal native image build time will discover the jnr-posix references in + * JnrLibc even though they won't be used at runtime. By default jnr-ffi (used by jnr-posix to do + * it's work) will use {@link ClassLoader#defineClass(String, byte[], int, int)} which isn't + * supported by Graal. This behaviour can be changed with a system property but the cleanest + * solution is simply to remove the references to jnr-posix code via a Graal substitution. + */ +@TargetClass(JnrLibc.class) +@Substitute +final class JnrLibcSubstitution implements Libc { + + @Substitute + public JnrLibcSubstitution() {} + + @Substitute + @Override + public boolean available() { + return false; + } + + @Substitute + @Override + public Optional gettimeofday() { + return Optional.empty(); + } + + @Substitute + @Override + public Optional getpid() { + return Optional.empty(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java new file mode 100644 index 00000000000..f3bda6a8c88 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Libc.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import java.util.Optional; + +public interface Libc { + + /* Maintained to allow Native.isXAvailable() functionality without trying to make a native call if + * the underlying support _is_ available. */ + boolean available(); + + Optional gettimeofday(); + + Optional getpid(); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java index 25df2d5d23a..e292914bb4b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/os/Native.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +17,6 @@ */ package com.datastax.oss.driver.internal.core.os; -import java.lang.reflect.Method; -import jnr.ffi.LibraryLoader; -import jnr.ffi.Platform; -import jnr.ffi.Pointer; -import jnr.ffi.Runtime; -import jnr.ffi.Struct; -import jnr.ffi.annotations.Out; -import jnr.ffi.annotations.Transient; -import jnr.posix.POSIXFactory; -import jnr.posix.util.DefaultPOSIXHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,13 +25,47 @@ public class Native { private static final Logger LOG = LoggerFactory.getLogger(Native.class); + private static class LibcLoader { + + /* These values come from Graal's imageinfo API which aims to offer the ability to detect + * when we're in the Graal build/run time via system props. The maintainers of Graal have + * agreed that this API will not change over time. We reference these props as literals + * to avoid introducing a dependency on Graal code for non-Graal users here. */ + private static final String GRAAL_STATUS_PROP = "org.graalvm.nativeimage.imagecode"; + private static final String GRAAL_BUILDTIME_STATUS = "buildtime"; + private static final String GRAAL_RUNTIME_STATUS = "runtime"; + + public Libc load() { + try { + if (isGraal()) { + LOG.info("Using Graal-specific native functions"); + return new GraalLibc(); + } + return new JnrLibc(); + } catch (Throwable t) { + LOG.info( + "Unable to load JNR native implementation. This could be normal if JNR is excluded from the classpath", + t); + return new EmptyLibc(); + } + } + + private boolean isGraal() { + + String val = System.getProperty(GRAAL_STATUS_PROP); + return val != null + && (val.equals(GRAAL_RUNTIME_STATUS) || val.equalsIgnoreCase(GRAAL_BUILDTIME_STATUS)); + } + } + + private static final Libc LIBC = new LibcLoader().load(); + private static final CpuInfo.Cpu CPU = CpuInfo.determineCpu(); + + private static final String NATIVE_CALL_ERR_MSG = "Native call failed or was not available"; + /** Whether {@link Native#currentTimeMicros()} is available on this system. */ public static boolean isCurrentTimeMicrosAvailable() { - try { - return LibCLoader.GET_TIME_OF_DAY_AVAILABLE; - } catch (NoClassDefFoundError e) { - return false; - } + return LIBC.available(); } /** @@ -47,155 +73,24 @@ public static boolean isCurrentTimeMicrosAvailable() { * {@link #isCurrentTimeMicrosAvailable()} is true. */ public static long currentTimeMicros() { - if (!isCurrentTimeMicrosAvailable()) { - throw new IllegalStateException( - "Native call not available. " - + "Check isCurrentTimeMicrosAvailable() before calling this method."); - } - LibCLoader.Timeval tv = new LibCLoader.Timeval(LibCLoader.LIB_C_RUNTIME); - int res = LibCLoader.LIB_C.gettimeofday(tv, null); - if (res != 0) { - throw new IllegalStateException("Call to libc.gettimeofday() failed with result " + res); - } - return tv.tv_sec.get() * 1000000 + tv.tv_usec.get(); + return LIBC.gettimeofday().orElseThrow(() -> new IllegalStateException(NATIVE_CALL_ERR_MSG)); } public static boolean isGetProcessIdAvailable() { - try { - return PosixLoader.GET_PID_AVAILABLE; - } catch (NoClassDefFoundError e) { - return false; - } + return LIBC.available(); } public static int getProcessId() { - if (!isGetProcessIdAvailable()) { - throw new IllegalStateException( - "Native call not available. " - + "Check isGetProcessIdAvailable() before calling this method."); - } - return PosixLoader.POSIX.getpid(); + return LIBC.getpid().orElseThrow(() -> new IllegalStateException(NATIVE_CALL_ERR_MSG)); } /** - * Returns {@code true} if JNR {@link Platform} class is loaded, and {@code false} otherwise. - * - * @return {@code true} if JNR {@link Platform} class is loaded. - */ - public static boolean isPlatformAvailable() { - try { - return PlatformLoader.PLATFORM != null; - } catch (NoClassDefFoundError e) { - return false; - } - } - - /** - * Returns the current processor architecture the JVM is running on, as reported by {@link - * Platform#getCPU()}. + * Returns the current processor architecture the JVM is running on. This value should match up to + * what's returned by jnr-ffi's Platform.getCPU() method. * * @return the current processor architecture. - * @throws IllegalStateException if JNR Platform library is not loaded. */ - public static String getCPU() { - if (!isPlatformAvailable()) - throw new IllegalStateException( - "JNR Platform class not loaded. " - + "Check isPlatformAvailable() before calling this method."); - return PlatformLoader.PLATFORM.getCPU().toString(); - } - - /** - * If jnr-ffi is not in the classpath at runtime, we'll fail to initialize the static fields - * below, but we still want {@link Native} to initialize successfully, so use an inner class. - */ - private static class LibCLoader { - - /** Handles libc calls through JNR (must be public). */ - public interface LibC { - int gettimeofday(@Out @Transient Timeval tv, Pointer unused); - } - - // See http://man7.org/linux/man-pages/man2/settimeofday.2.html - private static class Timeval extends Struct { - private final time_t tv_sec = new time_t(); - private final Unsigned32 tv_usec = new Unsigned32(); - - private Timeval(Runtime runtime) { - super(runtime); - } - } - - private static final LibC LIB_C; - private static final Runtime LIB_C_RUNTIME; - private static final boolean GET_TIME_OF_DAY_AVAILABLE; - - static { - LibC libc; - Runtime runtime = null; - try { - libc = LibraryLoader.create(LibC.class).load("c"); - runtime = Runtime.getRuntime(libc); - } catch (Throwable t) { - libc = null; - LOG.debug("Error loading libc", t); - } - LIB_C = libc; - LIB_C_RUNTIME = runtime; - boolean getTimeOfDayAvailable = false; - if (LIB_C_RUNTIME != null) { - try { - getTimeOfDayAvailable = LIB_C.gettimeofday(new Timeval(LIB_C_RUNTIME), null) == 0; - } catch (Throwable t) { - LOG.debug("Error accessing libc.gettimeofday()", t); - } - } - GET_TIME_OF_DAY_AVAILABLE = getTimeOfDayAvailable; - } - } - - /** @see LibCLoader */ - private static class PosixLoader { - private static final jnr.posix.POSIX POSIX; - private static final boolean GET_PID_AVAILABLE; - - static { - jnr.posix.POSIX posix; - try { - posix = POSIXFactory.getPOSIX(new DefaultPOSIXHandler(), true); - } catch (Throwable t) { - posix = null; - LOG.debug("Error loading POSIX", t); - } - POSIX = posix; - boolean getPidAvailable = false; - if (POSIX != null) { - try { - POSIX.getpid(); - getPidAvailable = true; - } catch (Throwable t) { - LOG.debug("Error accessing posix.getpid()", t); - } - } - GET_PID_AVAILABLE = getPidAvailable; - } - } - - private static class PlatformLoader { - - private static final Platform PLATFORM; - - static { - Platform platform; - try { - Class platformClass = Class.forName("jnr.ffi.Platform"); - Method getNativePlatform = platformClass.getMethod("getNativePlatform"); - platform = (Platform) getNativePlatform.invoke(null); - } catch (Throwable t) { - platform = null; - LOG.debug("Error loading jnr.ffi.Platform class, this class will not be available.", t); - } - PLATFORM = platform; - } + public static String getCpu() { + return CPU.toString(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java index 24891972763..6b7d06045bd 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPool.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -143,7 +145,6 @@ public boolean isInvalidKeyspace() { * request path, and we want to avoid complex check-then-act semantics; therefore this might * race and return a channel that is already closed, or {@code null}. In those cases, it is up * to the caller to fail fast and move to the next node. - *

      There is no need to return the channel. */ public DriverChannel next() { return channels.next(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java index 24ba12ac3bc..b854f4c326c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java index 0f6144c77a4..b02e15819d9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/pool/ChannelSet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,8 @@ import java.util.Iterator; import java.util.concurrent.locks.ReentrantLock; import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Concurrent structure used to store the channels of a pool. @@ -32,6 +36,15 @@ */ @ThreadSafe class ChannelSet implements Iterable { + + private static final Logger LOG = LoggerFactory.getLogger(ChannelSet.class); + /** + * The maximum number of iterations in the busy wait loop in {@link #next()} when there are + * multiple channels. This is a backstop to protect against thread starvation, in practice we've + * never observed more than 3 iterations in tests. + */ + private static final int MAX_ITERATIONS = 50; + private volatile DriverChannel[] channels; private final ReentrantLock lock = new ReentrantLock(); // must be held when mutating the array @@ -83,18 +96,27 @@ DriverChannel next() { case 0: return null; case 1: - return snapshot[0]; + DriverChannel onlyChannel = snapshot[0]; + return onlyChannel.preAcquireId() ? onlyChannel : null; default: - DriverChannel best = null; - int bestScore = 0; - for (DriverChannel channel : snapshot) { - int score = channel.getAvailableIds(); - if (score > bestScore) { - bestScore = score; - best = channel; + for (int i = 0; i < MAX_ITERATIONS; i++) { + DriverChannel best = null; + int bestScore = 0; + for (DriverChannel channel : snapshot) { + int score = channel.getAvailableIds(); + if (score > bestScore) { + bestScore = score; + best = channel; + } + } + if (best == null) { + return null; + } else if (best.preAcquireId()) { + return best; } } - return best; + LOG.trace("Could not select a channel after {} iterations", MAX_ITERATIONS); + return null; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java new file mode 100644 index 00000000000..74270caef91 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressors.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.protocol.internal.Compressor; +import io.netty.buffer.ByteBuf; +import java.util.Locale; + +/** + * Provides a single entry point to create compressor instances in the driver. + * + *

      Note that this class also serves as a convenient target for GraalVM substitutions, see {@link + * CompressorSubstitutions}. + */ +public class BuiltInCompressors { + + public static Compressor newInstance(String name, DriverContext context) { + switch (name.toLowerCase(Locale.ROOT)) { + case "lz4": + return new Lz4Compressor(context); + case "snappy": + return new SnappyCompressor(context); + case "none": + return Compressor.none(); + default: + throw new IllegalArgumentException( + String.format( + "Unsupported compression algorithm '%s' (from configuration option %s)", + name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java index a8e4960ff49..95e6be07434 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufCompressor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,21 +27,39 @@ public abstract class ByteBufCompressor implements Compressor { @Override public ByteBuf compress(ByteBuf uncompressed) { - return uncompressed.isDirect() ? compressDirect(uncompressed) : compressHeap(uncompressed); + return uncompressed.isDirect() + ? compressDirect(uncompressed, true) + : compressHeap(uncompressed, true); + } + + @Override + public ByteBuf compressWithoutLength(ByteBuf uncompressed) { + return uncompressed.isDirect() + ? compressDirect(uncompressed, false) + : compressHeap(uncompressed, false); } - protected abstract ByteBuf compressDirect(ByteBuf input); + protected abstract ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength); - protected abstract ByteBuf compressHeap(ByteBuf input); + protected abstract ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength); @Override public ByteBuf decompress(ByteBuf compressed) { - return compressed.isDirect() ? decompressDirect(compressed) : decompressHeap(compressed); + return decompressWithoutLength(compressed, readUncompressedLength(compressed)); + } + + protected abstract int readUncompressedLength(ByteBuf compressed); + + @Override + public ByteBuf decompressWithoutLength(ByteBuf compressed, int uncompressedLength) { + return compressed.isDirect() + ? decompressDirect(compressed, uncompressedLength) + : decompressHeap(compressed, uncompressedLength); } - protected abstract ByteBuf decompressDirect(ByteBuf input); + protected abstract ByteBuf decompressDirect(ByteBuf input, int uncompressedLength); - protected abstract ByteBuf decompressHeap(ByteBuf input); + protected abstract ByteBuf decompressHeap(ByteBuf input, int uncompressedLength); protected static ByteBuffer inputNioBuffer(ByteBuf buf) { // Using internalNioBuffer(...) as we only hold the reference in this method and so can diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java index 73b92f479de..1371009f989 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +26,7 @@ import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.zip.CRC32; import net.jcip.annotations.ThreadSafe; @ThreadSafe @@ -66,6 +69,16 @@ public ByteBuf concat(ByteBuf left, ByteBuf right) { } } + @Override + public void markReaderIndex(ByteBuf source) { + source.markReaderIndex(); + } + + @Override + public void resetReaderIndex(ByteBuf source) { + source.resetReaderIndex(); + } + @Override public byte readByte(ByteBuf source) { return source.readByte(); @@ -76,6 +89,11 @@ public int readInt(ByteBuf source) { return source.readInt(); } + @Override + public int readInt(ByteBuf source, int offset) { + return source.getInt(source.readerIndex() + offset); + } + @Override public InetAddress readInetAddr(ByteBuf source) { int length = readByte(source) & 0xFF; @@ -98,8 +116,9 @@ public int readUnsignedShort(ByteBuf source) { public ByteBuffer readBytes(ByteBuf source) { int length = readInt(source); if (length < 0) return null; - ByteBuf slice = source.readSlice(length); - return ByteBuffer.wrap(readRawBytes(slice)); + byte[] bytes = new byte[length]; + source.readBytes(bytes); + return ByteBuffer.wrap(bytes); } @Override @@ -127,6 +146,16 @@ public String readLongString(ByteBuf source) { return readString(source, length); } + @Override + public ByteBuf readRetainedSlice(ByteBuf source, int sliceLength) { + return source.readRetainedSlice(sliceLength); + } + + @Override + public void updateCrc(ByteBuf source, CRC32 crc) { + crc.update(source.internalNioBuffer(source.readerIndex(), source.readableBytes())); + } + @Override public void writeByte(byte b, ByteBuf dest) { dest.writeByte(b); @@ -194,22 +223,6 @@ public void writeShortBytes(byte[] bytes, ByteBuf dest) { dest.writeBytes(bytes); } - // Reads *all* readable bytes from a buffer and return them. - // If the buffer is backed by an array, this will return the underlying array directly, without - // copy. - private static byte[] readRawBytes(ByteBuf buffer) { - if (buffer.hasArray() && buffer.readableBytes() == buffer.array().length) { - // Move the readerIndex just so we consistently consume the input - buffer.readerIndex(buffer.writerIndex()); - return buffer.array(); - } - - // Otherwise, just read the bytes in a new array - byte[] bytes = new byte[buffer.readableBytes()]; - buffer.readBytes(bytes); - return bytes; - } - private static String readString(ByteBuf source, int length) { try { String str = source.toString(source.readerIndex(), length, CharsetUtil.UTF_8); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java new file mode 100644 index 00000000000..9b112559aab --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/ByteBufSegmentBuilder.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.FrameCodec; +import com.datastax.oss.protocol.internal.PrimitiveCodec; +import com.datastax.oss.protocol.internal.Segment; +import com.datastax.oss.protocol.internal.SegmentBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import java.util.ArrayList; +import java.util.List; +import net.jcip.annotations.NotThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@NotThreadSafe +public class ByteBufSegmentBuilder extends SegmentBuilder { + + private static final Logger LOG = LoggerFactory.getLogger(ByteBufSegmentBuilder.class); + + private final ChannelHandlerContext context; + private final String logPrefix; + + public ByteBufSegmentBuilder( + @NonNull ChannelHandlerContext context, + @NonNull PrimitiveCodec primitiveCodec, + @NonNull FrameCodec frameCodec, + @NonNull String logPrefix) { + super(primitiveCodec, frameCodec); + this.context = context; + this.logPrefix = logPrefix; + } + + @Override + @NonNull + protected ChannelPromise mergeStates(@NonNull List framePromises) { + if (framePromises.size() == 1) { + return framePromises.get(0); + } + // We concatenate multiple frames into one segment. When the segment is written, all the frames + // are written. + ChannelPromise segmentPromise = context.newPromise(); + ImmutableList dependents = ImmutableList.copyOf(framePromises); + segmentPromise.addListener( + future -> { + if (future.isSuccess()) { + for (ChannelPromise framePromise : dependents) { + framePromise.setSuccess(); + } + } else { + Throwable cause = future.cause(); + for (ChannelPromise framePromise : dependents) { + framePromise.setFailure(cause); + } + } + }); + return segmentPromise; + } + + @Override + @NonNull + protected List splitState(@NonNull ChannelPromise framePromise, int sliceCount) { + // We split one frame into multiple slices. When all slices are written, the frame is written. + List slicePromises = new ArrayList<>(sliceCount); + for (int i = 0; i < sliceCount; i++) { + slicePromises.add(context.newPromise()); + } + GenericFutureListener> sliceListener = + new SliceWriteListener(framePromise, slicePromises); + for (int i = 0; i < sliceCount; i++) { + slicePromises.get(i).addListener(sliceListener); + } + return slicePromises; + } + + @Override + protected void processSegment( + @NonNull Segment segment, @NonNull ChannelPromise segmentPromise) { + context.write(segment, segmentPromise); + } + + @Override + protected void onLargeFrameSplit(@NonNull Frame frame, int frameLength, int sliceCount) { + LOG.trace( + "[{}] Frame {} is too large ({} > {}), splitting into {} segments", + logPrefix, + frame.streamId, + frameLength, + Segment.MAX_PAYLOAD_LENGTH, + sliceCount); + } + + @Override + protected void onSegmentFull( + @NonNull Frame frame, int frameLength, int currentPayloadLength, int currentFrameCount) { + LOG.trace( + "[{}] Current self-contained segment is full ({}/{} bytes, {} frames), processing now", + logPrefix, + currentPayloadLength, + Segment.MAX_PAYLOAD_LENGTH, + currentFrameCount); + } + + @Override + protected void onSmallFrameAdded( + @NonNull Frame frame, int frameLength, int currentPayloadLength, int currentFrameCount) { + LOG.trace( + "[{}] Added frame {} to current self-contained segment " + + "(bringing it to {}/{} bytes, {} frames)", + logPrefix, + frame.streamId, + currentPayloadLength, + Segment.MAX_PAYLOAD_LENGTH, + currentFrameCount); + } + + @Override + protected void onLastSegmentFlushed(int currentPayloadLength, int currentFrameCount) { + LOG.trace( + "[{}] Flushing last self-contained segment ({}/{} bytes, {} frames)", + logPrefix, + currentPayloadLength, + Segment.MAX_PAYLOAD_LENGTH, + currentFrameCount); + } + + @NotThreadSafe + static class SliceWriteListener implements GenericFutureListener> { + + private final ChannelPromise parentPromise; + private final List slicePromises; + + // All slices are written to the same channel, and the segment is built from the Flusher which + // also runs on the same event loop, so we don't need synchronization. + private int remainingSlices; + + SliceWriteListener(@NonNull ChannelPromise parentPromise, List slicePromises) { + this.parentPromise = parentPromise; + this.slicePromises = slicePromises; + this.remainingSlices = slicePromises.size(); + } + + @Override + public void operationComplete(@NonNull Future future) { + if (!parentPromise.isDone()) { + if (future.isSuccess()) { + remainingSlices -= 1; + if (remainingSlices == 0) { + parentPromise.setSuccess(); + } + } else { + // If any slice fails, we can immediately mark the whole frame as failed: + parentPromise.setFailure(future.cause()); + // Cancel any remaining slice, Netty will not send the bytes. + for (ChannelPromise slicePromise : slicePromises) { + slicePromise.cancel(/*Netty ignores this*/ false); + } + } + } + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java new file mode 100644 index 00000000000..03125bd33a5 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoder.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import com.datastax.oss.driver.api.core.connection.CrcMismatchException; +import com.datastax.oss.protocol.internal.Segment; +import com.datastax.oss.protocol.internal.SegmentCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import java.nio.ByteOrder; +import net.jcip.annotations.NotThreadSafe; + +/** + * Decodes {@link Segment}s from a stream of bytes. + * + *

      This works like a regular length-field-based decoder, but we override {@link + * #getUnadjustedFrameLength} to handle two peculiarities: the length is encoded on 17 bits, and we + * also want to check the header CRC before we use it. So we parse the whole segment header ahead of + * time, and store it until we're ready to build the segment. + */ +@NotThreadSafe +public class BytesToSegmentDecoder extends LengthFieldBasedFrameDecoder { + + private final SegmentCodec segmentCodec; + private SegmentCodec.Header header; + + public BytesToSegmentDecoder(@NonNull SegmentCodec segmentCodec) { + super( + // max length (Netty wants this to be the overall length including everything): + segmentCodec.headerLength() + + SegmentCodec.CRC24_LENGTH + + Segment.MAX_PAYLOAD_LENGTH + + SegmentCodec.CRC32_LENGTH, + // offset and size of the "length" field: that's the whole header + 0, + segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH, + // length adjustment: add the trailing CRC to the declared length + SegmentCodec.CRC32_LENGTH, + // bytes to skip: the header (we've already parsed it while reading the length) + segmentCodec.headerLength() + SegmentCodec.CRC24_LENGTH); + this.segmentCodec = segmentCodec; + } + + @Override + protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { + try { + ByteBuf payloadAndCrc = (ByteBuf) super.decode(ctx, in); + if (payloadAndCrc == null) { + return null; + } else { + assert header != null; + try { + Segment segment = segmentCodec.decode(header, payloadAndCrc); + header = null; + return segment; + } catch (com.datastax.oss.protocol.internal.CrcMismatchException e) { + throw new CrcMismatchException(e.getMessage()); + } + } + } catch (Exception e) { + // Don't hold on to a stale header if we failed to decode the rest of the segment + header = null; + throw e; + } + } + + @Override + protected long getUnadjustedFrameLength(ByteBuf buffer, int offset, int length, ByteOrder order) { + // The parent class calls this repeatedly for the same "frame" if there weren't enough + // accumulated bytes the first time. Only decode the header the first time: + if (header == null) { + try { + header = segmentCodec.decodeHeader(buffer.slice(offset, length)); + } catch (com.datastax.oss.protocol.internal.CrcMismatchException e) { + throw new CrcMismatchException(e.getMessage()); + } + } + return header.payloadLength; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java new file mode 100644 index 00000000000..8a551a039db --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/CompressorSubstitutions.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import static com.datastax.oss.driver.internal.core.util.Dependency.LZ4; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; +import com.datastax.oss.protocol.internal.Compressor; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import io.netty.buffer.ByteBuf; +import java.util.Locale; +import java.util.function.BooleanSupplier; + +/** + * Handles GraalVM substitutions for compressors: LZ4 is only supported if we can find the native + * library in the classpath, and Snappy is never supported. + * + *

      When a compressor is not supported, we delete its class, and modify {@link + * BuiltInCompressors#newInstance(String, DriverContext)} to throw an error if the user attempts to + * configure it. + */ +@SuppressWarnings("unused") +public class CompressorSubstitutions { + + @TargetClass(value = BuiltInCompressors.class, onlyWith = Lz4Present.class) + public static final class BuiltInCompressorsLz4Only { + @Substitute + public static Compressor newInstance(String name, DriverContext context) { + switch (name.toLowerCase(Locale.ROOT)) { + case "lz4": + return new Lz4Compressor(context); + case "snappy": + throw new UnsupportedOperationException( + "Snappy compression is not supported for native images"); + case "none": + return Compressor.none(); + default: + throw new IllegalArgumentException( + String.format( + "Unsupported compression algorithm '%s' (from configuration option %s)", + name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); + } + } + } + + @TargetClass(value = BuiltInCompressors.class, onlyWith = Lz4Missing.class) + public static final class NoBuiltInCompressors { + @Substitute + public static Compressor newInstance(String name, DriverContext context) { + switch (name.toLowerCase(Locale.ROOT)) { + case "lz4": + throw new UnsupportedOperationException( + "This native image was not built with support for LZ4 compression"); + case "snappy": + throw new UnsupportedOperationException( + "Snappy compression is not supported for native images"); + case "none": + return Compressor.none(); + default: + throw new IllegalArgumentException( + String.format( + "Unsupported compression algorithm '%s' (from configuration option %s)", + name, DefaultDriverOption.PROTOCOL_COMPRESSION.getPath())); + } + } + } + + public static class Lz4Present implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return GraalDependencyChecker.isPresent(LZ4); + } + } + + public static class Lz4Missing extends Lz4Present { + @Override + public boolean getAsBoolean() { + return !super.getAsBoolean(); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java index f9519e32035..20816ba581b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java index ffa59651eec..c209f3f263b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameDecodingException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java index d1c37f28654..6504ab29728 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameEncoder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java new file mode 100644 index 00000000000..46c872f4adc --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/FrameToSegmentEncoder.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.FrameCodec; +import com.datastax.oss.protocol.internal.PrimitiveCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import net.jcip.annotations.NotThreadSafe; + +@NotThreadSafe +public class FrameToSegmentEncoder extends ChannelOutboundHandlerAdapter { + + private final PrimitiveCodec primitiveCodec; + private final FrameCodec frameCodec; + private final String logPrefix; + + private ByteBufSegmentBuilder segmentBuilder; + + public FrameToSegmentEncoder( + @NonNull PrimitiveCodec primitiveCodec, + @NonNull FrameCodec frameCodec, + @NonNull String logPrefix) { + this.primitiveCodec = primitiveCodec; + this.frameCodec = frameCodec; + this.logPrefix = logPrefix; + } + + @Override + public void handlerAdded(@NonNull ChannelHandlerContext ctx) { + segmentBuilder = new ByteBufSegmentBuilder(ctx, primitiveCodec, frameCodec, logPrefix); + } + + @Override + public void write( + @NonNull ChannelHandlerContext ctx, @NonNull Object msg, @NonNull ChannelPromise promise) + throws Exception { + if (msg instanceof Frame) { + segmentBuilder.addFrame(((Frame) msg), promise); + } else { + super.write(ctx, msg, promise); + } + } + + @Override + public void flush(@NonNull ChannelHandlerContext ctx) throws Exception { + segmentBuilder.flush(); + super.flush(ctx); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java index e1bce12fc11..d376cefc216 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/Lz4Compressor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,11 @@ */ package com.datastax.oss.driver.internal.core.protocol; +import static com.datastax.oss.driver.internal.core.util.Dependency.LZ4; + import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import io.netty.buffer.ByteBuf; import java.nio.ByteBuffer; import net.jcip.annotations.ThreadSafe; @@ -34,17 +40,21 @@ public class Lz4Compressor extends ByteBufCompressor { private final LZ4FastDecompressor decompressor; public Lz4Compressor(DriverContext context) { - try { + this(context.getSessionName()); + } + + @VisibleForTesting + Lz4Compressor(String sessionName) { + if (DefaultDependencyChecker.isPresent(LZ4)) { LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); - LOG.info("[{}] Using {}", context.getSessionName(), lz4Factory.toString()); + LOG.info("[{}] Using {}", sessionName, lz4Factory.toString()); this.compressor = lz4Factory.fastCompressor(); this.decompressor = lz4Factory.fastDecompressor(); - } catch (NoClassDefFoundError e) { + } else { throw new IllegalStateException( - "Error initializing compressor, make sure that the LZ4 library is in the classpath " + "Could not find the LZ4 library on the classpath " + "(the driver declares it as an optional dependency, " - + "so you need to declare it explicitly)", - e); + + "so you need to declare it explicitly)"); } } @@ -54,17 +64,20 @@ public String algorithm() { } @Override - protected ByteBuf compressDirect(ByteBuf input) { + protected ByteBuf compressDirect(ByteBuf input, boolean prependWithUncompressedLength) { int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); // If the input is direct we will allocate a direct output buffer as well as this will allow us // to use LZ4Compressor.compress and so eliminate memory copies. - ByteBuf output = input.alloc().directBuffer(4 + maxCompressedLength); + ByteBuf output = + input.alloc().directBuffer((prependWithUncompressedLength ? 4 : 0) + maxCompressedLength); try { ByteBuffer in = inputNioBuffer(input); // Increase reader index. input.readerIndex(input.writerIndex()); - output.writeInt(in.remaining()); + if (prependWithUncompressedLength) { + output.writeInt(in.remaining()); + } ByteBuffer out = outputNioBuffer(output); int written = @@ -81,7 +94,7 @@ protected ByteBuf compressDirect(ByteBuf input) { } @Override - protected ByteBuf compressHeap(ByteBuf input) { + protected ByteBuf compressHeap(ByteBuf input, boolean prependWithUncompressedLength) { int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes()); // Not a direct buffer so use byte arrays... @@ -93,9 +106,12 @@ protected ByteBuf compressHeap(ByteBuf input) { // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and // so can eliminate the overhead of allocate a new byte[]. - ByteBuf output = input.alloc().heapBuffer(4 + maxCompressedLength); + ByteBuf output = + input.alloc().heapBuffer((prependWithUncompressedLength ? 4 : 0) + maxCompressedLength); try { - output.writeInt(len); + if (prependWithUncompressedLength) { + output.writeInt(len); + } // calculate the correct offset. int offset = output.arrayOffset() + output.writerIndex(); byte[] out = output.array(); @@ -112,11 +128,15 @@ protected ByteBuf compressHeap(ByteBuf input) { } @Override - protected ByteBuf decompressDirect(ByteBuf input) { + protected int readUncompressedLength(ByteBuf compressed) { + return compressed.readInt(); + } + + @Override + protected ByteBuf decompressDirect(ByteBuf input, int uncompressedLength) { // If the input is direct we will allocate a direct output buffer as well as this will allow us // to use LZ4Compressor.decompress and so eliminate memory copies. int readable = input.readableBytes(); - int uncompressedLength = input.readInt(); ByteBuffer in = inputNioBuffer(input); // Increase reader index. input.readerIndex(input.writerIndex()); @@ -124,7 +144,7 @@ protected ByteBuf decompressDirect(ByteBuf input) { try { ByteBuffer out = outputNioBuffer(output); int read = decompressor.decompress(in, in.position(), out, out.position(), out.remaining()); - if (read != readable - 4) { + if (read != readable) { throw new IllegalArgumentException("Compressed lengths mismatch"); } @@ -139,11 +159,10 @@ protected ByteBuf decompressDirect(ByteBuf input) { } @Override - protected ByteBuf decompressHeap(ByteBuf input) { + protected ByteBuf decompressHeap(ByteBuf input, int uncompressedLength) { // Not a direct buffer so use byte arrays... byte[] in = input.array(); int len = input.readableBytes(); - int uncompressedLength = input.readInt(); int inOffset = input.arrayOffset() + input.readerIndex(); // Increase reader index. input.readerIndex(input.writerIndex()); @@ -153,9 +172,9 @@ protected ByteBuf decompressHeap(ByteBuf input) { ByteBuf output = input.alloc().heapBuffer(uncompressedLength); try { int offset = output.arrayOffset() + output.writerIndex(); - byte out[] = output.array(); + byte[] out = output.array(); int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength); - if (read != len - 4) { + if (read != len) { throw new IllegalArgumentException("Compressed lengths mismatch"); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java new file mode 100644 index 00000000000..c7845545df4 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToBytesEncoder.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import com.datastax.oss.protocol.internal.Segment; +import com.datastax.oss.protocol.internal.SegmentCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToMessageEncoder; +import java.util.List; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +@ChannelHandler.Sharable +public class SegmentToBytesEncoder extends MessageToMessageEncoder> { + + private final SegmentCodec segmentCodec; + + public SegmentToBytesEncoder(@NonNull SegmentCodec segmentCodec) { + this.segmentCodec = segmentCodec; + } + + @Override + protected void encode( + @NonNull ChannelHandlerContext ctx, + @NonNull Segment segment, + @NonNull List out) { + segmentCodec.encode(segment, out); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java new file mode 100644 index 00000000000..b15a17bb87f --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoder.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.FrameCodec; +import com.datastax.oss.protocol.internal.Segment; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToMessageDecoder; +import java.util.ArrayList; +import java.util.List; +import net.jcip.annotations.NotThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Converts the segments decoded by {@link BytesToSegmentDecoder} into legacy frames understood by + * the rest of the driver. + */ +@NotThreadSafe +public class SegmentToFrameDecoder extends MessageToMessageDecoder> { + + private static final Logger LOG = LoggerFactory.getLogger(SegmentToFrameDecoder.class); + + private static final int UNKNOWN_LENGTH = Integer.MIN_VALUE; + + private final FrameCodec frameCodec; + private final String logPrefix; + + // Accumulated state when we are reading a sequence of slices + private int targetLength = UNKNOWN_LENGTH; + private final List accumulatedSlices = new ArrayList<>(); + private int accumulatedLength; + + public SegmentToFrameDecoder(@NonNull FrameCodec frameCodec, @NonNull String logPrefix) { + this.logPrefix = logPrefix; + this.frameCodec = frameCodec; + } + + @Override + protected void decode( + @NonNull ChannelHandlerContext ctx, + @NonNull Segment segment, + @NonNull List out) { + if (segment.isSelfContained) { + decodeSelfContained(segment, out); + } else { + decodeSlice(segment, ctx.alloc(), out); + } + } + + private void decodeSelfContained(Segment segment, List out) { + ByteBuf payload = segment.payload; + int frameCount = 0; + try { + do { + Frame frame = frameCodec.decode(payload); + LOG.trace( + "[{}] Decoded response frame {} from self-contained segment", + logPrefix, + frame.streamId); + out.add(frame); + frameCount += 1; + } while (payload.isReadable()); + } finally { + payload.release(); + } + LOG.trace("[{}] Done processing self-contained segment ({} frames)", logPrefix, frameCount); + } + + private void decodeSlice(Segment segment, ByteBufAllocator allocator, List out) { + assert targetLength != UNKNOWN_LENGTH ^ (accumulatedSlices.isEmpty() && accumulatedLength == 0); + ByteBuf slice = segment.payload; + if (targetLength == UNKNOWN_LENGTH) { + // First slice, read ahead to find the target length + targetLength = FrameCodec.V3_ENCODED_HEADER_SIZE + frameCodec.decodeBodySize(slice); + } + accumulatedSlices.add(slice); + accumulatedLength += slice.readableBytes(); + int accumulatedSlicesSize = accumulatedSlices.size(); + LOG.trace( + "[{}] Decoded slice {}, {}/{} bytes", + logPrefix, + accumulatedSlicesSize, + accumulatedLength, + targetLength); + assert accumulatedLength <= targetLength; + if (accumulatedLength == targetLength) { + // We've received enough data to reassemble the whole message + CompositeByteBuf encodedFrame = allocator.compositeBuffer(accumulatedSlicesSize); + encodedFrame.addComponents(true, accumulatedSlices); + Frame frame; + try { + frame = frameCodec.decode(encodedFrame); + } finally { + encodedFrame.release(); + // Reset our state + targetLength = UNKNOWN_LENGTH; + accumulatedSlices.clear(); + accumulatedLength = 0; + } + LOG.trace( + "[{}] Decoded response frame {} from {} slices", + logPrefix, + frame.streamId, + accumulatedSlicesSize); + out.add(frame); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java index 3b50220ef95..21165d808b9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/SnappyCompressor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,24 +18,30 @@ package com.datastax.oss.driver.internal.core.protocol; import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; +import com.datastax.oss.driver.internal.core.util.Dependency; import io.netty.buffer.ByteBuf; import java.io.IOException; import java.nio.ByteBuffer; import net.jcip.annotations.ThreadSafe; import org.xerial.snappy.Snappy; +/** + * @implNote The Snappy protocol already encodes the uncompressed length in the compressed payload, + * so {@link #compress(ByteBuf)} and {@link #compressWithoutLength(ByteBuf)} produce the same + * output for this compressor. The corresponding parameters {@code + * prependWithUncompressedLength} and {@code uncompressedLength} are ignored by their respective + * methods. + */ @ThreadSafe public class SnappyCompressor extends ByteBufCompressor { public SnappyCompressor(@SuppressWarnings("unused") DriverContext context) { - try { - Snappy.getNativeLibraryVersion(); - } catch (NoClassDefFoundError e) { + if (!DefaultDependencyChecker.isPresent(Dependency.SNAPPY)) { throw new IllegalStateException( - "Error initializing compressor, make sure that the Snappy library is in the classpath " + "Could not find the Snappy library on the classpath " + "(the driver declares it as an optional dependency, " - + "so you need to declare it explicitly)", - e); + + "so you need to declare it explicitly)"); } } @@ -43,7 +51,8 @@ public String algorithm() { } @Override - protected ByteBuf compressDirect(ByteBuf input) { + protected ByteBuf compressDirect( + ByteBuf input, /*ignored*/ boolean prependWithUncompressedLength) { int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); // If the input is direct we will allocate a direct output buffer as well as this will allow us // to use Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies. @@ -66,7 +75,7 @@ protected ByteBuf compressDirect(ByteBuf input) { } @Override - protected ByteBuf compressHeap(ByteBuf input) { + protected ByteBuf compressHeap(ByteBuf input, /*ignored*/ boolean prependWithUncompressedLength) { int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); int inOffset = input.arrayOffset() + input.readerIndex(); byte[] in = input.array(); @@ -94,7 +103,15 @@ protected ByteBuf compressHeap(ByteBuf input) { } @Override - protected ByteBuf decompressDirect(ByteBuf input) { + protected int readUncompressedLength(ByteBuf compressed) { + // Since compress methods don't actually prepend with a length, we have nothing to read here. + // Return a bogus length (it will be ignored by the decompress methods, so the actual value + // doesn't matter). + return -1; + } + + @Override + protected ByteBuf decompressDirect(ByteBuf input, /*ignored*/ int uncompressedLength) { ByteBuffer in = inputNioBuffer(input); // Increase reader index. input.readerIndex(input.writerIndex()); @@ -124,7 +141,7 @@ protected ByteBuf decompressDirect(ByteBuf input) { } @Override - protected ByteBuf decompressHeap(ByteBuf input) throws RuntimeException { + protected ByteBuf decompressHeap(ByteBuf input, /*ignored*/ int uncompressedLength) { // Not a direct buffer so use byte arrays... int inOffset = input.arrayOffset() + input.readerIndex(); byte[] in = input.array(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java index a77aede1ecf..05da030eec3 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/protocol/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java new file mode 100644 index 00000000000..dbf534459a3 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryPolicy.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.retry; + +import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH_LOG; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.SIMPLE; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.UNLOGGED_BATCH; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; +import com.datastax.oss.driver.api.core.connection.HeartbeatException; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; +import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; +import com.datastax.oss.driver.api.core.servererrors.WriteType; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A retry policy that sometimes retries with a lower consistency level than the one initially + * requested. + * + *

      BEWARE: this policy may retry queries using a lower consistency level than the one + * initially requested. By doing so, it may break consistency guarantees. In other words, if you use + * this retry policy, there are cases (documented below) where a read at {@code QUORUM} may + * not see a preceding write at {@code QUORUM}. Furthermore, this policy doesn't always respect + * datacenter locality; for example, it may downgrade {@code LOCAL_QUORUM} to {@code ONE}, and thus + * could accidentally send a write that was intended for the local datacenter to another + * datacenter.Do not use this policy unless you have understood the cases where this can happen and + * are ok with that. + * + *

      This policy implements the same retries than the {@link DefaultRetryPolicy} policy. But on top + * of that, it also retries in the following cases: + * + *

        + *
      • On a read timeout: if the number of replicas that responded is greater than one, but lower + * than is required by the requested consistency level, the operation is retried at a lower + * consistency level. + *
      • On a write timeout: if the operation is a {@code WriteType.UNLOGGED_BATCH} and at least one + * replica acknowledged the write, the operation is retried at a lower consistency level. + * Furthermore, for other operations, if at least one replica acknowledged the write, the + * timeout is ignored. + *
      • On an unavailable exception: if at least one replica is alive, the operation is retried at + * a lower consistency level. + *
      + * + * The lower consistency level to use for retries is determined by the following rules: + * + *
        + *
      • if more than 3 replicas responded, use {@code THREE}. + *
      • if 1, 2 or 3 replicas responded, use the corresponding level {@code ONE}, {@code TWO} or + * {@code THREE}. + *
      + * + * Note that if the initial consistency level was {@code EACH_QUORUM}, Cassandra returns the number + * of live replicas in the datacenter that failed to reach consistency, not the overall + * number in the cluster. Therefore if this number is 0, we still retry at {@code ONE}, on the + * assumption that a host may still be up in another datacenter. + * + *

      The reasoning behind this retry policy is the following one. If, based on the information the + * Cassandra coordinator node returns, retrying the operation with the initially requested + * consistency has a chance to succeed, do it. Otherwise, if based on this information, we know that + * the initially requested consistency level cannot be achieved currently, then: + * + *

        + *
      • For writes, ignore the exception (thus silently failing the consistency requirement) if we + * know the write has been persisted on at least one replica. + *
      • For reads, try reading at a lower consistency level (thus silently failing the consistency + * requirement). + *
      + * + * In other words, this policy implements the idea that if the requested consistency level cannot be + * achieved, the next best thing for writes is to make sure the data is persisted, and that reading + * something is better than reading nothing, even if there is a risk of reading stale data. + */ +public class ConsistencyDowngradingRetryPolicy implements RetryPolicy { + + private static final Logger LOG = + LoggerFactory.getLogger(ConsistencyDowngradingRetryPolicy.class); + + @VisibleForTesting + public static final String VERDICT_ON_READ_TIMEOUT = + "[{}] Verdict on read timeout (consistency: {}, required responses: {}, " + + "received responses: {}, data retrieved: {}, retries: {}): {}"; + + @VisibleForTesting + public static final String VERDICT_ON_WRITE_TIMEOUT = + "[{}] Verdict on write timeout (consistency: {}, write type: {}, " + + "required acknowledgments: {}, received acknowledgments: {}, retries: {}): {}"; + + @VisibleForTesting + public static final String VERDICT_ON_UNAVAILABLE = + "[{}] Verdict on unavailable exception (consistency: {}, " + + "required replica: {}, alive replica: {}, retries: {}): {}"; + + @VisibleForTesting + public static final String VERDICT_ON_ABORTED = + "[{}] Verdict on aborted request (type: {}, message: '{}', retries: {}): {}"; + + @VisibleForTesting + public static final String VERDICT_ON_ERROR = + "[{}] Verdict on node error (type: {}, message: '{}', retries: {}): {}"; + + private final String logPrefix; + + @SuppressWarnings("unused") + public ConsistencyDowngradingRetryPolicy( + @NonNull DriverContext context, @NonNull String profileName) { + this(context.getSessionName() + "|" + profileName); + } + + public ConsistencyDowngradingRetryPolicy(@NonNull String logPrefix) { + this.logPrefix = logPrefix; + } + + /** + * {@inheritDoc} + * + *

      This implementation triggers a maximum of one retry. If less replicas responded than + * required by the consistency level (but at least one replica did respond), the operation is + * retried at a lower consistency level. If enough replicas responded but data was not retrieved, + * the operation is retried with the initial consistency level. Otherwise, an exception is thrown. + */ + @Override + public RetryVerdict onReadTimeoutVerdict( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int blockFor, + int received, + boolean dataPresent, + int retryCount) { + RetryVerdict verdict; + if (retryCount != 0) { + verdict = RetryVerdict.RETHROW; + } else if (cl.isSerial()) { + // CAS reads are not all that useful in terms of visibility of the writes since CAS write + // supports the normal consistency levels on the committing phase. So the main use case for + // CAS reads is probably for when you've timed out on a CAS write and want to make sure what + // happened. Downgrading in that case would be always wrong so we just special-case to + // rethrow. + verdict = RetryVerdict.RETHROW; + } else if (received < blockFor) { + verdict = maybeDowngrade(received, cl); + } else if (!dataPresent) { + // Retry with same CL since this usually means that enough replica are alive to satisfy the + // consistency but the coordinator picked a dead one for data retrieval, not having detected + // that replica as dead yet. + verdict = RetryVerdict.RETRY_SAME; + } else { + // This usually means a digest mismatch, in which case it's pointless to retry since + // the inconsistency has to be repaired first. + verdict = RetryVerdict.RETHROW; + } + if (LOG.isTraceEnabled()) { + LOG.trace( + VERDICT_ON_READ_TIMEOUT, + logPrefix, + cl, + blockFor, + received, + dataPresent, + retryCount, + verdict); + } + return verdict; + } + + /** + * {@inheritDoc} + * + *

      This implementation triggers a maximum of one retry. If {@code writeType == + * WriteType.BATCH_LOG}, the write is retried with the initial consistency level. If {@code + * writeType == WriteType.UNLOGGED_BATCH} and at least one replica acknowledged, the write is + * retried with a lower consistency level (with unlogged batch, a write timeout can always + * mean that part of the batch haven't been persisted at all, even if {@code receivedAcks > 0}). + * For other write types ({@code WriteType.SIMPLE} and {@code WriteType.BATCH}), if we know the + * write has been persisted on at least one replica, we ignore the exception. Otherwise, an + * exception is thrown. + */ + @Override + public RetryVerdict onWriteTimeoutVerdict( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + @NonNull WriteType writeType, + int blockFor, + int received, + int retryCount) { + RetryVerdict verdict; + if (retryCount != 0) { + verdict = RetryVerdict.RETHROW; + } else if (SIMPLE.equals(writeType) || BATCH.equals(writeType)) { + // Since we provide atomicity, if at least one replica acknowledged the write, + // there is no point in retrying + verdict = received > 0 ? RetryVerdict.IGNORE : RetryVerdict.RETHROW; + } else if (UNLOGGED_BATCH.equals(writeType)) { + // Since only part of the batch could have been persisted, + // retry with whatever consistency should allow to persist all + verdict = maybeDowngrade(received, cl); + } else if (BATCH_LOG.equals(writeType)) { + verdict = RetryVerdict.RETRY_SAME; + } else { + verdict = RetryVerdict.RETHROW; + } + if (LOG.isTraceEnabled()) { + LOG.trace( + VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + cl, + writeType, + blockFor, + received, + retryCount, + verdict); + } + return verdict; + } + + /** + * {@inheritDoc} + * + *

      This implementation triggers a maximum of one retry. If at least one replica is known to be + * alive, the operation is retried at a lower consistency level. + */ + @Override + public RetryVerdict onUnavailableVerdict( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int required, + int alive, + int retryCount) { + RetryVerdict verdict; + if (retryCount != 0) { + verdict = RetryVerdict.RETHROW; + } else if (cl.isSerial()) { + // JAVA-764: if the requested consistency level is serial, it means that the + // operation failed at the paxos phase of a LWT. + // Retry on the next host, on the assumption that the initial coordinator could be + // network-isolated. + verdict = RetryVerdict.RETRY_NEXT; + } else { + verdict = maybeDowngrade(alive, cl); + } + if (LOG.isTraceEnabled()) { + LOG.trace(VERDICT_ON_UNAVAILABLE, logPrefix, cl, required, alive, retryCount, verdict); + } + return verdict; + } + + @Override + public RetryVerdict onRequestAbortedVerdict( + @NonNull Request request, @NonNull Throwable error, int retryCount) { + RetryVerdict verdict = + error instanceof ClosedConnectionException || error instanceof HeartbeatException + ? RetryVerdict.RETRY_NEXT + : RetryVerdict.RETHROW; + if (LOG.isTraceEnabled()) { + LOG.trace( + VERDICT_ON_ABORTED, + logPrefix, + error.getClass().getSimpleName(), + error.getMessage(), + retryCount, + verdict); + } + return verdict; + } + + @Override + public RetryVerdict onErrorResponseVerdict( + @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { + RetryVerdict verdict = + error instanceof WriteFailureException || error instanceof ReadFailureException + ? RetryVerdict.RETHROW + : RetryVerdict.RETRY_NEXT; + if (LOG.isTraceEnabled()) { + LOG.trace( + VERDICT_ON_ERROR, + logPrefix, + error.getClass().getSimpleName(), + error.getMessage(), + retryCount, + verdict); + } + return verdict; + } + + @Override + @Deprecated + public RetryDecision onReadTimeout( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int blockFor, + int received, + boolean dataPresent, + int retryCount) { + throw new UnsupportedOperationException("onReadTimeout"); + } + + @Override + @Deprecated + public RetryDecision onWriteTimeout( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + @NonNull WriteType writeType, + int blockFor, + int received, + int retryCount) { + throw new UnsupportedOperationException("onWriteTimeout"); + } + + @Override + @Deprecated + public RetryDecision onUnavailable( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int required, + int alive, + int retryCount) { + throw new UnsupportedOperationException("onUnavailable"); + } + + @Override + @Deprecated + public RetryDecision onRequestAborted( + @NonNull Request request, @NonNull Throwable error, int retryCount) { + throw new UnsupportedOperationException("onRequestAborted"); + } + + @Override + @Deprecated + public RetryDecision onErrorResponse( + @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { + throw new UnsupportedOperationException("onErrorResponse"); + } + + @Override + public void close() {} + + private RetryVerdict maybeDowngrade(int alive, ConsistencyLevel current) { + if (alive >= 3) { + return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.THREE); + } + if (alive == 2) { + return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.TWO); + } + // JAVA-1005: EACH_QUORUM does not report a global number of alive replicas + // so even if we get 0 alive replicas, there might be a node up in some other datacenter + if (alive == 1 || current.getProtocolCode() == ConsistencyLevel.EACH_QUORUM.getProtocolCode()) { + return new ConsistencyDowngradingRetryVerdict(ConsistencyLevel.ONE); + } + return RetryVerdict.RETHROW; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java new file mode 100644 index 00000000000..d78f80c7354 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/ConsistencyDowngradingRetryVerdict.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.retry; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import com.datastax.oss.driver.api.core.session.Request; +import edu.umd.cs.findbugs.annotations.NonNull; + +public class ConsistencyDowngradingRetryVerdict implements RetryVerdict { + + private final ConsistencyLevel consistencyLevel; + + public ConsistencyDowngradingRetryVerdict(@NonNull ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + } + + @NonNull + @Override + public RetryDecision getRetryDecision() { + return RetryDecision.RETRY_SAME; + } + + @NonNull + @Override + public RequestT getRetryRequest(@NonNull RequestT previous) { + if (previous instanceof Statement) { + Statement statement = (Statement) previous; + @SuppressWarnings("unchecked") + RequestT toRetry = (RequestT) statement.setConsistencyLevel(consistencyLevel); + return toRetry; + } + return previous; + } + + @Override + public String toString() { + return getRetryDecision() + " at consistency " + consistencyLevel; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java index c15cfb41baa..8cea1a564b5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,32 +61,31 @@ public class DefaultRetryPolicy implements RetryPolicy { private static final Logger LOG = LoggerFactory.getLogger(DefaultRetryPolicy.class); @VisibleForTesting - static final String RETRYING_ON_READ_TIMEOUT = + public static final String RETRYING_ON_READ_TIMEOUT = "[{}] Retrying on read timeout on same host (consistency: {}, required responses: {}, " + "received responses: {}, data retrieved: {}, retries: {})"; @VisibleForTesting - static final String RETRYING_ON_WRITE_TIMEOUT = + public static final String RETRYING_ON_WRITE_TIMEOUT = "[{}] Retrying on write timeout on same host (consistency: {}, write type: {}, " + "required acknowledgments: {}, received acknowledgments: {}, retries: {})"; @VisibleForTesting - static final String RETRYING_ON_UNAVAILABLE = + public static final String RETRYING_ON_UNAVAILABLE = "[{}] Retrying on unavailable exception on next host (consistency: {}, " + "required replica: {}, alive replica: {}, retries: {})"; @VisibleForTesting - static final String RETRYING_ON_ABORTED = + public static final String RETRYING_ON_ABORTED = "[{}] Retrying on aborted request on next host (retries: {})"; @VisibleForTesting - static final String RETRYING_ON_ERROR = "[{}] Retrying on node error on next host (retries: {})"; + public static final String RETRYING_ON_ERROR = + "[{}] Retrying on node error on next host (retries: {})"; private final String logPrefix; - public DefaultRetryPolicy( - @SuppressWarnings("unused") DriverContext context, - @SuppressWarnings("unused") String profileName) { + public DefaultRetryPolicy(DriverContext context, String profileName) { this.logPrefix = (context != null ? context.getSessionName() : null) + "|" + profileName; } @@ -101,6 +102,7 @@ public DefaultRetryPolicy( *

      Otherwise, the exception is rethrown. */ @Override + @Deprecated public RetryDecision onReadTimeout( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -134,6 +136,7 @@ public RetryDecision onReadTimeout( *

      Otherwise, the exception is rethrown. */ @Override + @Deprecated public RetryDecision onWriteTimeout( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -166,6 +169,7 @@ public RetryDecision onWriteTimeout( *

      Otherwise, the exception is rethrown. */ @Override + @Deprecated public RetryDecision onUnavailable( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -189,6 +193,7 @@ public RetryDecision onUnavailable( * (assuming a driver bug) in all other cases. */ @Override + @Deprecated public RetryDecision onRequestAborted( @NonNull Request request, @NonNull Throwable error, int retryCount) { @@ -211,6 +216,7 @@ public RetryDecision onRequestAborted( * node. */ @Override + @Deprecated public RetryDecision onErrorResponse( @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java new file mode 100644 index 00000000000..e74651e30de --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryVerdict.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.retry; + +import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import edu.umd.cs.findbugs.annotations.NonNull; + +public class DefaultRetryVerdict implements RetryVerdict { + + private final RetryDecision decision; + + public DefaultRetryVerdict(@NonNull RetryDecision decision) { + this.decision = decision; + } + + @NonNull + @Override + public RetryDecision getRetryDecision() { + return decision; + } + + @Override + public String toString() { + return getRetryDecision().name(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java index 598db8fbbbe..7abe49a98c0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/DefaultWriteTypeRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java index 1634b3066bf..537c3922f0f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/servererrors/WriteTypeRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java new file mode 100644 index 00000000000..dc6e6a295a1 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessors.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.session; + +import static com.datastax.oss.driver.internal.core.util.Dependency.REACTIVE_STREAMS; +import static com.datastax.oss.driver.internal.core.util.Dependency.TINKERPOP; + +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestSyncProcessor; +import com.datastax.dse.driver.internal.core.cql.continuous.reactive.ContinuousCqlRequestReactiveProcessor; +import com.datastax.dse.driver.internal.core.cql.reactive.CqlRequestReactiveProcessor; +import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; +import com.datastax.dse.driver.internal.core.graph.GraphRequestSyncProcessor; +import com.datastax.dse.driver.internal.core.graph.GraphSupportChecker; +import com.datastax.dse.driver.internal.core.graph.reactive.ReactiveGraphRequestProcessor; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; +import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; +import com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor; +import com.datastax.oss.driver.internal.core.util.DefaultDependencyChecker; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BuiltInRequestProcessors { + + private static final Logger LOG = LoggerFactory.getLogger(BuiltInRequestProcessors.class); + + public static List> createDefaultProcessors(DefaultDriverContext context) { + List> processors = new ArrayList<>(); + addBasicProcessors(processors, context); + if (DefaultDependencyChecker.isPresent(TINKERPOP)) { + addGraphProcessors(context, processors); + } else { + LOG.debug("Tinkerpop was not found on the classpath: graph extensions will not be available"); + } + if (DefaultDependencyChecker.isPresent(REACTIVE_STREAMS)) { + addReactiveProcessors(processors); + } else { + LOG.debug( + "Reactive Streams was not found on the classpath: reactive extensions will not be available"); + } + if (DefaultDependencyChecker.isPresent(REACTIVE_STREAMS) + && DefaultDependencyChecker.isPresent(TINKERPOP)) { + addGraphReactiveProcessors(context, processors); + } + return processors; + } + + public static void addBasicProcessors( + List> processors, DefaultDriverContext context) { + // regular requests (sync and async) + CqlRequestAsyncProcessor cqlRequestAsyncProcessor = new CqlRequestAsyncProcessor(); + CqlRequestSyncProcessor cqlRequestSyncProcessor = + new CqlRequestSyncProcessor(cqlRequestAsyncProcessor); + processors.add(cqlRequestAsyncProcessor); + processors.add(cqlRequestSyncProcessor); + + // prepare requests (sync and async) + CqlPrepareAsyncProcessor cqlPrepareAsyncProcessor = + new CqlPrepareAsyncProcessor(Optional.of(context)); + CqlPrepareSyncProcessor cqlPrepareSyncProcessor = + new CqlPrepareSyncProcessor(cqlPrepareAsyncProcessor); + processors.add(cqlPrepareAsyncProcessor); + processors.add(cqlPrepareSyncProcessor); + + // continuous requests (sync and async) + ContinuousCqlRequestAsyncProcessor continuousCqlRequestAsyncProcessor = + new ContinuousCqlRequestAsyncProcessor(); + ContinuousCqlRequestSyncProcessor continuousCqlRequestSyncProcessor = + new ContinuousCqlRequestSyncProcessor(continuousCqlRequestAsyncProcessor); + processors.add(continuousCqlRequestAsyncProcessor); + processors.add(continuousCqlRequestSyncProcessor); + } + + public static void addGraphProcessors( + DefaultDriverContext context, List> processors) { + GraphRequestAsyncProcessor graphRequestAsyncProcessor = + new GraphRequestAsyncProcessor(context, new GraphSupportChecker()); + GraphRequestSyncProcessor graphRequestSyncProcessor = + new GraphRequestSyncProcessor(graphRequestAsyncProcessor); + processors.add(graphRequestAsyncProcessor); + processors.add(graphRequestSyncProcessor); + } + + public static void addReactiveProcessors(List> processors) { + CqlRequestReactiveProcessor cqlRequestReactiveProcessor = + new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); + ContinuousCqlRequestReactiveProcessor continuousCqlRequestReactiveProcessor = + new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); + processors.add(cqlRequestReactiveProcessor); + processors.add(continuousCqlRequestReactiveProcessor); + } + + public static void addGraphReactiveProcessors( + DefaultDriverContext context, List> processors) { + ReactiveGraphRequestProcessor reactiveGraphRequestProcessor = + new ReactiveGraphRequestProcessor( + new GraphRequestAsyncProcessor(context, new GraphSupportChecker())); + processors.add(reactiveGraphRequestProcessor); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java new file mode 100644 index 00000000000..b8bca431228 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/BuiltInRequestProcessorsSubstitutions.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.session; + +import static com.datastax.oss.driver.internal.core.util.Dependency.REACTIVE_STREAMS; +import static com.datastax.oss.driver.internal.core.util.Dependency.TINKERPOP; + +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.util.GraalDependencyChecker; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import java.util.ArrayList; +import java.util.List; +import java.util.function.BooleanSupplier; + +@SuppressWarnings("unused") +public class BuiltInRequestProcessorsSubstitutions { + + @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphMissingReactiveMissing.class) + public static final class BuiltInRequestProcessorsGraphMissingReactiveMissing { + + @Substitute + public static List> createDefaultProcessors( + DefaultDriverContext context) { + List> processors = new ArrayList<>(); + BuiltInRequestProcessors.addBasicProcessors(processors, context); + return processors; + } + } + + @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphMissingReactivePresent.class) + public static final class BuiltInRequestProcessorsGraphMissingReactivePresent { + + @Substitute + public static List> createDefaultProcessors( + DefaultDriverContext context) { + List> processors = new ArrayList<>(); + BuiltInRequestProcessors.addBasicProcessors(processors, context); + BuiltInRequestProcessors.addReactiveProcessors(processors); + return processors; + } + } + + @TargetClass(value = BuiltInRequestProcessors.class, onlyWith = GraphPresentReactiveMissing.class) + public static final class BuiltInRequestProcessorsGraphPresentReactiveMissing { + + @Substitute + public static List> createDefaultProcessors( + DefaultDriverContext context) { + List> processors = new ArrayList<>(); + BuiltInRequestProcessors.addBasicProcessors(processors, context); + BuiltInRequestProcessors.addGraphProcessors(context, processors); + return processors; + } + } + + public static class GraphMissingReactiveMissing implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return !GraalDependencyChecker.isPresent(TINKERPOP) + && !GraalDependencyChecker.isPresent(REACTIVE_STREAMS); + } + } + + public static class GraphMissingReactivePresent implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return !GraalDependencyChecker.isPresent(TINKERPOP) + && GraalDependencyChecker.isPresent(REACTIVE_STREAMS); + } + } + + public static class GraphPresentReactiveMissing implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return GraalDependencyChecker.isPresent(TINKERPOP) + && !GraalDependencyChecker.isPresent(REACTIVE_STREAMS); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java index 8f2dbcc59e7..b795c30fce7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/DefaultSession.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,10 +34,12 @@ import com.datastax.oss.driver.internal.core.channel.DriverChannel; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.context.LifecycleListener; -import com.datastax.oss.driver.internal.core.control.ControlConnection; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; import com.datastax.oss.driver.internal.core.metadata.MetadataManager; +import com.datastax.oss.driver.internal.core.metadata.MetadataManager.RefreshSchemaResult; import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; import com.datastax.oss.driver.internal.core.metadata.NodeStateManager; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; import com.datastax.oss.driver.internal.core.pool.ChannelPool; import com.datastax.oss.driver.internal.core.util.Loggers; @@ -55,6 +59,7 @@ import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; @@ -80,6 +85,8 @@ public class DefaultSession implements CqlSession { private static final Logger LOG = LoggerFactory.getLogger(DefaultSession.class); + private static final AtomicInteger INSTANCE_COUNT = new AtomicInteger(); + public static CompletionStage init( InternalDriverContext context, Set contactPoints, CqlIdentifier keyspace) { return new DefaultSession(context, contactPoints).init(keyspace); @@ -95,7 +102,20 @@ public static CompletionStage init( private final SessionMetricUpdater metricUpdater; private DefaultSession(InternalDriverContext context, Set contactPoints) { - LOG.debug("Creating new session {}", context.getSessionName()); + int instanceCount = INSTANCE_COUNT.incrementAndGet(); + int threshold = + context.getConfig().getDefaultProfile().getInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD); + LOG.debug( + "Creating new session {} ({} live instances)", context.getSessionName(), instanceCount); + if (threshold > 0 && instanceCount > threshold) { + LOG.warn( + "You have too many session instances: {} active, expected less than {} " + + "(see '{}' in the configuration)", + instanceCount, + threshold, + DefaultDriverOption.SESSION_LEAK_THRESHOLD.getPath()); + } + this.logPrefix = context.getSessionName(); this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); try { @@ -106,6 +126,10 @@ private DefaultSession(InternalDriverContext context, Set contactPoint this.poolManager = context.getPoolManager(); this.metricUpdater = context.getMetricsFactory().getSessionUpdater(); } catch (Throwable t) { + LOG.debug( + "Error creating session {} ({} live instances)", + context.getSessionName(), + INSTANCE_COUNT.decrementAndGet()); // Rethrow but make sure we release any resources allocated by Netty. At this stage there are // no scheduled tasks on the event loops so getNow() won't block. try { @@ -153,7 +177,9 @@ public CompletionStage setSchemaMetadataEnabled(@Nullable Boolean newV @NonNull @Override public CompletionStage refreshSchemaAsync() { - return metadataManager.refreshSchema(null, true, true); + return metadataManager + .refreshSchema(null, true, true) + .thenApply(RefreshSchemaResult::getMetadata); } @NonNull @@ -280,6 +306,7 @@ private class SingleThreaded { private final InternalDriverContext context; private final Set initialContactPoints; private final NodeStateManager nodeStateManager; + private final SchemaListenerNotifier schemaListenerNotifier; private final CompletableFuture initFuture = new CompletableFuture<>(); private boolean initWasCalled; private final CompletableFuture closeFuture = new CompletableFuture<>(); @@ -290,8 +317,9 @@ private SingleThreaded(InternalDriverContext context, Set contactPoint this.context = context; this.nodeStateManager = new NodeStateManager(context); this.initialContactPoints = contactPoints; - new SchemaListenerNotifier( - context.getSchemaChangeListener(), context.getEventBus(), adminExecutor); + this.schemaListenerNotifier = + new SchemaListenerNotifier( + context.getSchemaChangeListener(), context.getEventBus(), adminExecutor); context .getEventBus() .register( @@ -340,36 +368,60 @@ private void init(CqlIdentifier keyspace) { } initFuture.completeExceptionally(error); }); + LOG.debug( + "Error initializing new session {} ({} live instances)", + context.getSessionName(), + INSTANCE_COUNT.decrementAndGet()); return; } + closeFuture.whenComplete( + (v, error) -> + LOG.debug( + "Closing session {} ({} live instances)", + context.getSessionName(), + INSTANCE_COUNT.decrementAndGet())); + MetadataManager metadataManager = context.getMetadataManager(); metadataManager.addContactPoints(initialContactPoints); context .getTopologyMonitor() .init() .thenCompose(v -> metadataManager.refreshNodes()) - .thenAccept(v -> afterInitialNodeListRefresh(keyspace)) - .exceptionally( - error -> { - initFuture.completeExceptionally(error); - RunOrSchedule.on(adminExecutor, this::close); - return null; + .thenCompose(v -> checkProtocolVersion()) + .thenCompose(v -> initialSchemaRefresh()) + .thenCompose(v -> initializePools(keyspace)) + .whenComplete( + (v, error) -> { + if (error == null) { + LOG.debug("[{}] Initialization complete, ready", logPrefix); + notifyListeners(); + initFuture.complete(DefaultSession.this); + } else { + LOG.debug("[{}] Initialization failed, force closing", logPrefix, error); + forceCloseAsync() + .whenComplete( + (v1, error1) -> { + if (error1 != null) { + error.addSuppressed(error1); + } + initFuture.completeExceptionally(error); + }); + } }); } - private void afterInitialNodeListRefresh(CqlIdentifier keyspace) { + private CompletionStage checkProtocolVersion() { try { boolean protocolWasForced = context.getConfig().getDefaultProfile().isDefined(DefaultDriverOption.PROTOCOL_VERSION); - boolean needSchemaRefresh = true; if (!protocolWasForced) { ProtocolVersion currentVersion = context.getProtocolVersion(); ProtocolVersion bestVersion = context .getProtocolVersionRegistry() .highestCommon(metadataManager.getMetadata().getNodes().values()); - if (!currentVersion.equals(bestVersion)) { + if (bestVersion.getCode() < currentVersion.getCode()) { LOG.info( "[{}] Negotiated protocol version {} for the initial contact point, " + "but other nodes only support {}, downgrading", @@ -377,54 +429,58 @@ private void afterInitialNodeListRefresh(CqlIdentifier keyspace) { currentVersion, bestVersion); context.getChannelFactory().setProtocolVersion(bestVersion); - ControlConnection controlConnection = context.getControlConnection(); - // Might not have initialized yet if there is a custom TopologyMonitor - if (controlConnection.isInit()) { - controlConnection.reconnectNow(); - // Reconnection already triggers a full schema refresh - needSchemaRefresh = false; - } + + // Note that, with the default topology monitor, the control connection is already + // connected with currentVersion at this point. This doesn't really matter because none + // of the control queries use any protocol-dependent feature. + // Keep going as-is, the control connection might switch to the "correct" version later + // if it reconnects to another node. + } else if (bestVersion.getCode() > currentVersion.getCode()) { + LOG.info( + "[{}] Negotiated protocol version {} for the initial contact point, " + + "but cluster seems to support {}, keeping the negotiated version", + logPrefix, + currentVersion, + bestVersion); } } - if (needSchemaRefresh) { - metadataManager.refreshSchema(null, false, true); - } - metadataManager - .firstSchemaRefreshFuture() - .thenAccept(v -> afterInitialSchemaRefresh(keyspace)); + return CompletableFuture.completedFuture(null); + } catch (Throwable throwable) { + return CompletableFutures.failedFuture(throwable); + } + } + private CompletionStage initialSchemaRefresh() { + try { + return metadataManager + .refreshSchema(null, false, true) + .exceptionally( + error -> { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error while refreshing schema during initialization, " + + "proceeding without schema metadata", + logPrefix, + error); + return null; + }); } catch (Throwable throwable) { - initFuture.completeExceptionally(throwable); + return CompletableFutures.failedFuture(throwable); } } - private void afterInitialSchemaRefresh(CqlIdentifier keyspace) { + private CompletionStage initializePools(CqlIdentifier keyspace) { try { nodeStateManager.markInitialized(); context.getLoadBalancingPolicyWrapper().init(); context.getConfigLoader().onDriverInit(context); - LOG.debug("[{}] Initialization complete, ready", logPrefix); - poolManager - .init(keyspace) - .whenComplete( - (v, error) -> { - if (error != null) { - initFuture.completeExceptionally(error); - } else { - initFuture.complete(DefaultSession.this); - notifyLifecycleListeners(); - } - }); + return poolManager.init(keyspace); } catch (Throwable throwable) { - forceCloseAsync() - .whenComplete( - (v, error) -> { - initFuture.completeExceptionally(throwable); - }); + return CompletableFutures.failedFuture(throwable); } } - private void notifyLifecycleListeners() { + private void notifyListeners() { for (LifecycleListener lifecycleListener : context.getLifecycleListeners()) { try { lifecycleListener.onSessionReady(); @@ -437,18 +493,52 @@ private void notifyLifecycleListeners() { t); } } + try { + context.getNodeStateListener().onSessionReady(DefaultSession.this); + } catch (Throwable t) { + Loggers.warnWithException( + LOG, + "[{}] Error while notifying {} of session ready", + logPrefix, + context.getNodeStateListener(), + t); + } + try { + schemaListenerNotifier.onSessionReady(DefaultSession.this); + } catch (Throwable t) { + Loggers.warnWithException( + LOG, + "[{}] Error while notifying {} of session ready", + logPrefix, + schemaListenerNotifier, + t); + } + try { + context.getRequestTracker().onSessionReady(DefaultSession.this); + } catch (Throwable t) { + Loggers.warnWithException( + LOG, + "[{}] Error while notifying {} of session ready", + logPrefix, + context.getRequestTracker(), + t); + } } private void onNodeStateChanged(NodeStateEvent event) { assert adminExecutor.inEventLoop(); - if (event.newState == null) { - context.getNodeStateListener().onRemove(event.node); + DefaultNode node = event.node; + if (node == null) { + LOG.debug( + "[{}] Node for this event was removed, ignoring state change: {}", logPrefix, event); + } else if (event.newState == null) { + context.getNodeStateListener().onRemove(node); } else if (event.oldState == null && event.newState == NodeState.UNKNOWN) { - context.getNodeStateListener().onAdd(event.node); + context.getNodeStateListener().onAdd(node); } else if (event.newState == NodeState.UP) { - context.getNodeStateListener().onUp(event.node); + context.getNodeStateListener().onUp(node); } else if (event.newState == NodeState.DOWN || event.newState == NodeState.FORCED_DOWN) { - context.getNodeStateListener().onDown(event.node); + context.getNodeStateListener().onDown(node); } } @@ -462,6 +552,14 @@ private void close() { closePolicies(); + // clear metrics to prevent memory leak + for (Node n : metadataManager.getMetadata().getNodes().values()) { + NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); + if (updater != null) updater.clearMetrics(); + } + + if (metricUpdater != null) metricUpdater.clearMetrics(); + List> childrenCloseStages = new ArrayList<>(); for (AsyncAutoCloseable closeable : internalComponentsToClose()) { childrenCloseStages.add(closeable.closeAsync()); @@ -481,6 +579,14 @@ private void forceClose() { logPrefix, (closeWasCalled ? "" : "not ")); + // clear metrics to prevent memory leak + for (Node n : metadataManager.getMetadata().getNodes().values()) { + NodeMetricUpdater updater = ((DefaultNode) n).getMetricUpdater(); + if (updater != null) updater.clearMetrics(); + } + + if (metricUpdater != null) metricUpdater.clearMetrics(); + if (closeWasCalled) { // onChildrenClosed has already been scheduled for (AsyncAutoCloseable closeable : internalComponentsToClose()) { @@ -510,7 +616,6 @@ private void onChildrenClosed(List> childrenCloseStages) { if (!f.isSuccess()) { closeFuture.completeExceptionally(f.cause()); } else { - LOG.debug("[{}] Shutdown complete", logPrefix); closeFuture.complete(null); } }); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java index 610669e965b..661be017461 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/PoolManager.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -82,8 +84,7 @@ public class PoolManager implements AsyncAutoCloseable { // (e.g. DefaultPreparedStatement) which are handled at the protocol level (e.g. // CqlPrepareAsyncProcessor). We keep the two separate to avoid introducing a dependency from the // session to a particular processor implementation. - private ConcurrentMap repreparePayloads = - new MapMaker().weakValues().makeMap(); + private final ConcurrentMap repreparePayloads; private final String logPrefix; private final EventExecutor adminExecutor; @@ -95,6 +96,14 @@ public PoolManager(InternalDriverContext context) { this.adminExecutor = context.getNettyOptions().adminEventExecutorGroup().next(); this.config = context.getConfig().getDefaultProfile(); this.singleThreaded = new SingleThreaded(context); + + if (config.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) { + LOG.debug("[{}] Prepared statements cache configured to use weak values", logPrefix); + this.repreparePayloads = new MapMaker().weakValues().makeMap(); + } else { + LOG.debug("[{}] Prepared statements cache configured to use strong values", logPrefix); + this.repreparePayloads = new MapMaker().makeMap(); + } } public CompletionStage init(CqlIdentifier keyspace) { @@ -314,10 +323,14 @@ private void processStateEvent(NodeStateEvent event) { NodeState newState = event.newState; if (pending.containsKey(node)) { pendingStateEvents.put(node, event); - } else if (newState == NodeState.FORCED_DOWN) { + } else if (newState == null || newState == NodeState.FORCED_DOWN) { ChannelPool pool = pools.remove(node); if (pool != null) { - LOG.debug("[{}] {} was FORCED_DOWN, destroying pool", logPrefix, node); + LOG.debug( + "[{}] {} was {}, destroying pool", + logPrefix, + node, + newState == null ? "removed" : newState.name()); pool.closeAsync() .exceptionally( error -> { @@ -404,6 +417,7 @@ private void reprepareStatements(ChannelPool pool) { new ReprepareOnUp( logPrefix + "|" + pool.getNode().getEndPoint(), pool, + adminExecutor, repreparePayloads, context, () -> RunOrSchedule.on(adminExecutor, () -> onPoolReady(pool))) diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java index bd65c045673..ee979473fd1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUp.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.connection.BusyConnectionException; import com.datastax.oss.driver.api.core.session.throttling.RequestThrottler; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; @@ -26,12 +29,14 @@ import com.datastax.oss.driver.internal.core.cql.CqlRequestHandler; import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.protocol.internal.Message; import com.datastax.oss.protocol.internal.request.Prepare; import com.datastax.oss.protocol.internal.request.Query; import com.datastax.oss.protocol.internal.util.Bytes; +import io.netty.util.concurrent.EventExecutor; import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayDeque; @@ -64,7 +69,8 @@ class ReprepareOnUp { new Query("SELECT prepared_id FROM system.prepared_statements"); private final String logPrefix; - private final DriverChannel channel; + private final ChannelPool pool; + private final EventExecutor adminExecutor; private final Map repreparePayloads; private final Runnable whenPrepared; private final boolean checkSystemTable; @@ -74,8 +80,8 @@ class ReprepareOnUp { private final RequestThrottler throttler; private final SessionMetricUpdater metricUpdater; - // After the constructor, everything happens on the channel's event loop, so these fields do not - // need any synchronization. + // After the constructor, everything happens on adminExecutor, so these fields do not need any + // synchronization. private Set serverKnownIds; private Queue toReprepare; private int runningWorkers; @@ -83,12 +89,14 @@ class ReprepareOnUp { ReprepareOnUp( String logPrefix, ChannelPool pool, + EventExecutor adminExecutor, Map repreparePayloads, InternalDriverContext context, Runnable whenPrepared) { this.logPrefix = logPrefix; - this.channel = pool.next(); + this.pool = pool; + this.adminExecutor = adminExecutor; this.repreparePayloads = repreparePayloads; this.whenPrepared = whenPrepared; this.throttler = context.getRequestThrottler(); @@ -109,10 +117,6 @@ void start() { if (repreparePayloads.isEmpty()) { LOG.debug("[{}] No statements to reprepare, done", logPrefix); whenPrepared.run(); - } else if (this.channel == null) { - // Should not happen, but handle cleanly - LOG.debug("[{}] No channel available to reprepare, done", logPrefix); - whenPrepared.run(); } else { // Check log level because ConcurrentMap.size is not a constant operation if (LOG.isDebugEnabled()) { @@ -124,14 +128,14 @@ void start() { if (checkSystemTable) { LOG.debug("[{}] Checking which statements the server knows about", logPrefix); queryAsync(QUERY_SERVER_IDS, Collections.emptyMap(), "QUERY system.prepared_statements") - .whenComplete(this::gatherServerIds); + .whenCompleteAsync(this::gatherServerIds, adminExecutor); } else { LOG.debug( "[{}] {} is disabled, repreparing directly", logPrefix, DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE.getPath()); RunOrSchedule.on( - channel.eventLoop(), + adminExecutor, () -> { serverKnownIds = Collections.emptySet(); gatherPayloadsToReprepare(); @@ -141,7 +145,7 @@ void start() { } private void gatherServerIds(AdminResult rows, Throwable error) { - assert channel.eventLoop().inEventLoop(); + assert adminExecutor.inEventLoop(); if (serverKnownIds == null) { serverKnownIds = new HashSet<>(); } @@ -157,7 +161,7 @@ private void gatherServerIds(AdminResult rows, Throwable error) { } if (rows.hasNextPage()) { LOG.debug("[{}] system.prepared_statements has more pages", logPrefix); - rows.nextPage().whenComplete(this::gatherServerIds); + rows.nextPage().whenCompleteAsync(this::gatherServerIds, adminExecutor); } else { LOG.debug("[{}] Gathered {} server ids, proceeding", logPrefix, serverKnownIds.size()); gatherPayloadsToReprepare(); @@ -166,7 +170,7 @@ private void gatherServerIds(AdminResult rows, Throwable error) { } private void gatherPayloadsToReprepare() { - assert channel.eventLoop().inEventLoop(); + assert adminExecutor.inEventLoop(); toReprepare = new ArrayDeque<>(); for (RepreparePayload payload : repreparePayloads.values()) { if (serverKnownIds.contains(payload.id)) { @@ -198,7 +202,7 @@ private void gatherPayloadsToReprepare() { } private void startWorkers() { - assert channel.eventLoop().inEventLoop(); + assert adminExecutor.inEventLoop(); runningWorkers = Math.min(maxParallelism, toReprepare.size()); LOG.debug( "[{}] Repreparing {} statements with {} parallel workers", @@ -211,7 +215,7 @@ private void startWorkers() { } private void startWorker() { - assert channel.eventLoop().inEventLoop(); + assert adminExecutor.inEventLoop(); if (toReprepare.isEmpty()) { runningWorkers -= 1; if (runningWorkers == 0) { @@ -220,33 +224,55 @@ private void startWorker() { } } else { RepreparePayload payload = toReprepare.poll(); - queryAsync( + prepareAsync( new Prepare( payload.query, (payload.keyspace == null ? null : payload.keyspace.asInternal())), - payload.customPayload, - String.format("Reprepare '%s'", payload.query)) - .handle( + payload.customPayload) + .handleAsync( (result, error) -> { // Don't log, AdminRequestHandler does already startWorker(); return null; - }); + }, + adminExecutor); } } @VisibleForTesting protected CompletionStage queryAsync( Message message, Map customPayload, String debugString) { - ThrottledAdminRequestHandler reprepareHandler = - new ThrottledAdminRequestHandler( - channel, - message, - customPayload, - timeout, - throttler, - metricUpdater, - logPrefix, - debugString); - return reprepareHandler.start(); + DriverChannel channel = pool.next(); + if (channel == null) { + return CompletableFutures.failedFuture( + new BusyConnectionException("Found no channel to execute reprepare query")); + } else { + ThrottledAdminRequestHandler reprepareHandler = + ThrottledAdminRequestHandler.query( + channel, + false, + message, + customPayload, + timeout, + throttler, + metricUpdater, + logPrefix, + debugString); + return reprepareHandler.start(); + } + } + + @VisibleForTesting + protected CompletionStage prepareAsync( + Message message, Map customPayload) { + DriverChannel channel = pool.next(); + if (channel == null) { + return CompletableFutures.failedFuture( + new BusyConnectionException("Found no channel to execute reprepare query")); + } else { + ThrottledAdminRequestHandler reprepareHandler = + ThrottledAdminRequestHandler.prepare( + channel, false, message, customPayload, timeout, throttler, metricUpdater, logPrefix); + return reprepareHandler.start(); + } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java index eaa9541a59f..7c4b10442a7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RepreparePayload.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.internal.core.cql.DefaultPreparedStatement; +import com.datastax.oss.protocol.internal.request.Prepare; import java.nio.ByteBuffer; import java.util.Map; import net.jcip.annotations.Immutable; @@ -45,4 +48,8 @@ public RepreparePayload( this.keyspace = keyspace; this.customPayload = customPayload; } + + public Prepare toMessage() { + return new Prepare(query, keyspace == null ? null : keyspace.asInternal()); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java index b61ccebb090..49599667d70 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java index aca57fda97f..b993365d201 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/RequestProcessorRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +19,6 @@ import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; -import com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; @@ -31,22 +29,6 @@ public class RequestProcessorRegistry { private static final Logger LOG = LoggerFactory.getLogger(RequestProcessorRegistry.class); - public static RequestProcessorRegistry defaultCqlProcessors(String logPrefix) { - CqlRequestAsyncProcessor requestAsyncProcessor = new CqlRequestAsyncProcessor(); - CqlRequestSyncProcessor requestSyncProcessor = - new CqlRequestSyncProcessor(requestAsyncProcessor); - CqlPrepareAsyncProcessor prepareAsyncProcessor = new CqlPrepareAsyncProcessor(); - CqlPrepareSyncProcessor prepareSyncProcessor = - new CqlPrepareSyncProcessor(prepareAsyncProcessor); - - return new RequestProcessorRegistry( - logPrefix, - requestAsyncProcessor, - requestSyncProcessor, - prepareAsyncProcessor, - prepareSyncProcessor); - } - private final String logPrefix; // Effectively immutable: the contents are never modified after construction private final RequestProcessor[] processors; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java index 4e4c9765157..51ba4d30624 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SchemaListenerNotifier.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.session; import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.internal.core.context.EventBus; import com.datastax.oss.driver.internal.core.metadata.schema.events.AggregateChangeEvent; import com.datastax.oss.driver.internal.core.metadata.schema.events.FunctionChangeEvent; @@ -33,6 +36,11 @@ class SchemaListenerNotifier { private final SchemaChangeListener listener; private final EventExecutor adminExecutor; + // It is technically possible that a schema change could happen in the middle of session + // initialization. Don't forward events in this case, it would likely do more harm than good if a + // listener implementation doesn't expect it. + private boolean sessionReady; + SchemaListenerNotifier( SchemaChangeListener listener, EventBus eventBus, EventExecutor adminExecutor) { this.listener = listener; @@ -53,93 +61,114 @@ class SchemaListenerNotifier { ViewChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onViewChangeEvent)); } + void onSessionReady(Session session) { + RunOrSchedule.on( + adminExecutor, + () -> { + sessionReady = true; + listener.onSessionReady(session); + }); + } + private void onAggregateChangeEvent(AggregateChangeEvent event) { assert adminExecutor.inEventLoop(); - switch (event.changeType) { - case CREATED: - listener.onAggregateCreated(event.newAggregate); - break; - case UPDATED: - listener.onAggregateUpdated(event.newAggregate, event.oldAggregate); - break; - case DROPPED: - listener.onAggregateDropped(event.oldAggregate); - break; + if (sessionReady) { + switch (event.changeType) { + case CREATED: + listener.onAggregateCreated(event.newAggregate); + break; + case UPDATED: + listener.onAggregateUpdated(event.newAggregate, event.oldAggregate); + break; + case DROPPED: + listener.onAggregateDropped(event.oldAggregate); + break; + } } } private void onFunctionChangeEvent(FunctionChangeEvent event) { assert adminExecutor.inEventLoop(); - switch (event.changeType) { - case CREATED: - listener.onFunctionCreated(event.newFunction); - break; - case UPDATED: - listener.onFunctionUpdated(event.newFunction, event.oldFunction); - break; - case DROPPED: - listener.onFunctionDropped(event.oldFunction); - break; + if (sessionReady) { + switch (event.changeType) { + case CREATED: + listener.onFunctionCreated(event.newFunction); + break; + case UPDATED: + listener.onFunctionUpdated(event.newFunction, event.oldFunction); + break; + case DROPPED: + listener.onFunctionDropped(event.oldFunction); + break; + } } } private void onKeyspaceChangeEvent(KeyspaceChangeEvent event) { assert adminExecutor.inEventLoop(); - switch (event.changeType) { - case CREATED: - listener.onKeyspaceCreated(event.newKeyspace); - break; - case UPDATED: - listener.onKeyspaceUpdated(event.newKeyspace, event.oldKeyspace); - break; - case DROPPED: - listener.onKeyspaceDropped(event.oldKeyspace); - break; + if (sessionReady) { + switch (event.changeType) { + case CREATED: + listener.onKeyspaceCreated(event.newKeyspace); + break; + case UPDATED: + listener.onKeyspaceUpdated(event.newKeyspace, event.oldKeyspace); + break; + case DROPPED: + listener.onKeyspaceDropped(event.oldKeyspace); + break; + } } } private void onTableChangeEvent(TableChangeEvent event) { assert adminExecutor.inEventLoop(); - switch (event.changeType) { - case CREATED: - listener.onTableCreated(event.newTable); - break; - case UPDATED: - listener.onTableUpdated(event.newTable, event.oldTable); - break; - case DROPPED: - listener.onTableDropped(event.oldTable); - break; + if (sessionReady) { + switch (event.changeType) { + case CREATED: + listener.onTableCreated(event.newTable); + break; + case UPDATED: + listener.onTableUpdated(event.newTable, event.oldTable); + break; + case DROPPED: + listener.onTableDropped(event.oldTable); + break; + } } } private void onTypeChangeEvent(TypeChangeEvent event) { assert adminExecutor.inEventLoop(); - switch (event.changeType) { - case CREATED: - listener.onUserDefinedTypeCreated(event.newType); - break; - case UPDATED: - listener.onUserDefinedTypeUpdated(event.newType, event.oldType); - break; - case DROPPED: - listener.onUserDefinedTypeDropped(event.oldType); - break; + if (sessionReady) { + switch (event.changeType) { + case CREATED: + listener.onUserDefinedTypeCreated(event.newType); + break; + case UPDATED: + listener.onUserDefinedTypeUpdated(event.newType, event.oldType); + break; + case DROPPED: + listener.onUserDefinedTypeDropped(event.oldType); + break; + } } } private void onViewChangeEvent(ViewChangeEvent event) { assert adminExecutor.inEventLoop(); - switch (event.changeType) { - case CREATED: - listener.onViewCreated(event.newView); - break; - case UPDATED: - listener.onViewUpdated(event.newView, event.oldView); - break; - case DROPPED: - listener.onViewDropped(event.oldView); - break; + if (sessionReady) { + switch (event.changeType) { + case CREATED: + listener.onViewCreated(event.newView); + break; + case UPDATED: + listener.onViewUpdated(event.newView, event.oldView); + break; + case DROPPED: + listener.onViewDropped(event.oldView); + break; + } } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java index c697718a2d0..1a1270b41c8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/SessionWrapper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java index ebfc838f4ac..8146c5b113a 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,10 +25,10 @@ import com.datastax.oss.driver.api.core.session.throttling.Throttled; import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.ArrayDeque; +import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Deque; -import java.util.concurrent.locks.ReentrantLock; -import net.jcip.annotations.GuardedBy; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.atomic.AtomicInteger; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,17 +60,12 @@ public class ConcurrencyLimitingRequestThrottler implements RequestThrottler { private final String logPrefix; private final int maxConcurrentRequests; private final int maxQueueSize; - - private final ReentrantLock lock = new ReentrantLock(); - - @GuardedBy("lock") - private int concurrentRequests; - - @GuardedBy("lock") - private Deque queue = new ArrayDeque<>(); - - @GuardedBy("lock") - private boolean closed; + private final AtomicInteger concurrentRequests = new AtomicInteger(0); + // CLQ is not O(1) for size(), as it forces a full iteration of the queue. So, we track + // the size of the queue explicitly. + private final Deque queue = new ConcurrentLinkedDeque<>(); + private final AtomicInteger queueSize = new AtomicInteger(0); + private volatile boolean closed = false; public ConcurrencyLimitingRequestThrottler(DriverContext context) { this.logPrefix = context.getSessionName(); @@ -85,40 +82,64 @@ public ConcurrencyLimitingRequestThrottler(DriverContext context) { @Override public void register(@NonNull Throttled request) { - lock.lock(); - try { - if (closed) { - LOG.trace("[{}] Rejecting request after shutdown", logPrefix); - fail(request, "The session is shutting down"); - } else if (queue.isEmpty() && concurrentRequests < maxConcurrentRequests) { - // We have capacity for one more concurrent request + if (closed) { + LOG.trace("[{}] Rejecting request after shutdown", logPrefix); + fail(request, "The session is shutting down"); + return; + } + + // Implementation note: Technically the "concurrent requests" or "queue size" + // could read transiently over the limit, but the queue itself will never grow + // beyond the limit since we always check for that condition and revert if + // over-limit. We do this instead of a CAS-loop to avoid the potential loop. + + // If no backlog exists AND we get capacity, we can execute immediately + if (queueSize.get() == 0) { + // Take a claim first, and then check if we are OK to proceed + int newConcurrent = concurrentRequests.incrementAndGet(); + if (newConcurrent <= maxConcurrentRequests) { LOG.trace("[{}] Starting newly registered request", logPrefix); - concurrentRequests += 1; request.onThrottleReady(false); - } else if (queue.size() < maxQueueSize) { - LOG.trace("[{}] Enqueuing request", logPrefix); - queue.add(request); + return; } else { - LOG.trace("[{}] Rejecting request because of full queue", logPrefix); - fail( - request, - String.format( - "The session has reached its maximum capacity " - + "(concurrent requests: %d, queue size: %d)", - maxConcurrentRequests, maxQueueSize)); + // We exceeded the limit, decrement the count and fall through to the queuing logic + concurrentRequests.decrementAndGet(); } - } finally { - lock.unlock(); + } + + // If we have a backlog, or we failed to claim capacity, try to enqueue + int newQueueSize = queueSize.incrementAndGet(); + if (newQueueSize <= maxQueueSize) { + LOG.trace("[{}] Enqueuing request", logPrefix); + queue.offer(request); + + // Double-check that we were still supposed to be enqueued; it is possible + // that the session was closed while we were enqueuing, it's also possible + // that it is right now removing the request, so we need to check both + if (closed) { + if (queue.remove(request)) { + queueSize.decrementAndGet(); + LOG.trace("[{}] Rejecting late request after shutdown", logPrefix); + fail(request, "The session is shutting down"); + } + } + } else { + LOG.trace("[{}] Rejecting request because of full queue", logPrefix); + queueSize.decrementAndGet(); + fail( + request, + String.format( + "The session has reached its maximum capacity " + + "(concurrent requests: %d, queue size: %d)", + maxConcurrentRequests, maxQueueSize)); } } @Override public void signalSuccess(@NonNull Throttled request) { - lock.lock(); - try { - onRequestDone(); - } finally { - lock.unlock(); + Throttled nextRequest = onRequestDoneAndDequeNext(); + if (nextRequest != null) { + nextRequest.onThrottleReady(true); } } @@ -129,75 +150,79 @@ public void signalError(@NonNull Throttled request, @NonNull Throwable error) { @Override public void signalTimeout(@NonNull Throttled request) { - lock.lock(); - try { - if (!closed) { - if (queue.remove(request)) { // The request timed out before it was active - LOG.trace("[{}] Removing timed out request from the queue", logPrefix); - } else { - onRequestDone(); - } + Throttled nextRequest = null; + if (!closed) { + if (queue.remove(request)) { // The request timed out before it was active + queueSize.decrementAndGet(); + LOG.trace("[{}] Removing timed out request from the queue", logPrefix); + } else { + nextRequest = onRequestDoneAndDequeNext(); } - } finally { - lock.unlock(); + } + + if (nextRequest != null) { + nextRequest.onThrottleReady(true); } } - @SuppressWarnings("GuardedBy") // this method is only called with the lock held - private void onRequestDone() { - assert lock.isHeldByCurrentThread(); + @Override + public void signalCancel(@NonNull Throttled request) { + Throttled nextRequest = null; if (!closed) { - if (queue.isEmpty()) { - concurrentRequests -= 1; + if (queue.remove(request)) { // The request has been cancelled before it was active + queueSize.decrementAndGet(); + LOG.trace("[{}] Removing cancelled request from the queue", logPrefix); } else { + nextRequest = onRequestDoneAndDequeNext(); + } + } + + if (nextRequest != null) { + nextRequest.onThrottleReady(true); + } + } + + @Nullable + private Throttled onRequestDoneAndDequeNext() { + if (!closed) { + Throttled nextRequest = queue.poll(); + if (nextRequest == null) { + concurrentRequests.decrementAndGet(); + } else { + queueSize.decrementAndGet(); LOG.trace("[{}] Starting dequeued request", logPrefix); - queue.poll().onThrottleReady(true); - // don't touch concurrentRequests since we finished one but started another + return nextRequest; } } + + // no next task was dequeued + return null; } @Override public void close() { - lock.lock(); - try { - closed = true; - LOG.debug("[{}] Rejecting {} queued requests after shutdown", logPrefix, queue.size()); - for (Throttled request : queue) { - fail(request, "The session is shutting down"); - } - } finally { - lock.unlock(); + closed = true; + + LOG.debug("[{}] Rejecting {} queued requests after shutdown", logPrefix, queueSize.get()); + Throttled request; + while ((request = queue.poll()) != null) { + queueSize.decrementAndGet(); + fail(request, "The session is shutting down"); } } public int getQueueSize() { - lock.lock(); - try { - return queue.size(); - } finally { - lock.unlock(); - } + return queueSize.get(); } @VisibleForTesting int getConcurrentRequests() { - lock.lock(); - try { - return concurrentRequests; - } finally { - lock.unlock(); - } + return concurrentRequests.get(); } @VisibleForTesting Deque getQueue() { - lock.lock(); - try { - return queue; - } finally { - lock.unlock(); - } + return queue; } private static void fail(Throttled request, String message) { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java index 65587e7c3a0..9a25059caef 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/NanoClock.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java index 11fa2632a34..2210e4b26f1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/PassThroughRequestThrottler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -67,6 +69,11 @@ public void signalTimeout(@NonNull Throttled request) { // nothing to do } + @Override + public void signalCancel(@NonNull Throttled request) { + // nothing to do + } + @Override public void close() throws IOException { // nothing to do diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java index ab4035a4d46..03a693dc0fe 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -196,6 +198,18 @@ public void signalTimeout(@NonNull Throttled request) { } } + @Override + public void signalCancel(@NonNull Throttled request) { + lock.lock(); + try { + if (!closed && queue.remove(request)) { // The request has been cancelled before it was active + LOG.trace("[{}] Removing cancelled request from the queue", logPrefix); + } + } finally { + lock.unlock(); + } + } + @Override public void close() { lock.lock(); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java index a33b0d33cc9..5e84f6b1002 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/ConstantSpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java index b758e206c3a..2f6b17286e5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/specex/NoSpeculativeExecutionPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java index e60088b7b25..343d3f9e4e7 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/DefaultSslEngineFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,16 +22,19 @@ import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import edu.umd.cs.findbugs.annotations.NonNull; import java.io.InputStream; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.security.KeyStore; import java.security.SecureRandom; +import java.time.Duration; import java.util.List; -import javax.net.ssl.KeyManagerFactory; +import java.util.Optional; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; @@ -52,6 +57,7 @@ * truststore-password = password123 * keystore-path = /path/to/client.keystore * keystore-password = password123 + * keystore-reload-interval = 30 minutes * } * } * @@ -64,6 +70,8 @@ public class DefaultSslEngineFactory implements SslEngineFactory { private final SSLContext sslContext; private final String[] cipherSuites; private final boolean requireHostnameValidation; + private final boolean allowDnsReverseLookupSan; + private ReloadingKeyManagerFactory kmf; /** Builds a new instance from the driver configuration. */ public DefaultSslEngineFactory(DriverContext driverContext) { @@ -82,6 +90,28 @@ public DefaultSslEngineFactory(DriverContext driverContext) { } this.requireHostnameValidation = config.getBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, true); + this.allowDnsReverseLookupSan = + config.getBoolean(DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, true); + } + + @VisibleForTesting + protected String hostname(InetSocketAddress addr) { + return allowDnsReverseLookupSan ? hostMaybeFromDnsReverseLookup(addr) : hostNoLookup(addr); + } + + @VisibleForTesting + protected String hostMaybeFromDnsReverseLookup(InetSocketAddress addr) { + // See java.net.InetSocketAddress.getHostName: + // "This method may trigger a name service reverse lookup if the address was created with a + // literal IP address." + return addr.getHostName(); + } + + @VisibleForTesting + protected String hostNoLookup(InetSocketAddress addr) { + // See java.net.InetSocketAddress.getHostString: + // "This has the benefit of not attempting a reverse lookup" + return addr.getHostString(); } @NonNull @@ -91,7 +121,7 @@ public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { SocketAddress remoteAddress = remoteEndpoint.resolve(); if (remoteAddress instanceof InetSocketAddress) { InetSocketAddress socketAddress = (InetSocketAddress) remoteAddress; - engine = sslContext.createSSLEngine(socketAddress.getHostName(), socketAddress.getPort()); + engine = sslContext.createSSLEngine(hostname(socketAddress), socketAddress.getPort()); } else { engine = sslContext.createSSLEngine(); } @@ -130,20 +160,8 @@ protected SSLContext buildContext(DriverExecutionProfile config) throws Exceptio } // initialize keystore if configured. - KeyManagerFactory kmf = null; if (config.isDefined(DefaultDriverOption.SSL_KEYSTORE_PATH)) { - try (InputStream ksf = - Files.newInputStream( - Paths.get(config.getString(DefaultDriverOption.SSL_KEYSTORE_PATH)))) { - KeyStore ks = KeyStore.getInstance("JKS"); - char[] password = - config.isDefined(DefaultDriverOption.SSL_KEYSTORE_PASSWORD) - ? config.getString(DefaultDriverOption.SSL_KEYSTORE_PASSWORD).toCharArray() - : null; - ks.load(ksf, password); - kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, password); - } + kmf = buildReloadingKeyManagerFactory(config); } context.init( @@ -157,8 +175,19 @@ protected SSLContext buildContext(DriverExecutionProfile config) throws Exceptio } } + private ReloadingKeyManagerFactory buildReloadingKeyManagerFactory(DriverExecutionProfile config) + throws Exception { + Path keystorePath = Paths.get(config.getString(DefaultDriverOption.SSL_KEYSTORE_PATH)); + String password = config.getString(DefaultDriverOption.SSL_KEYSTORE_PASSWORD, null); + Optional reloadInterval = + Optional.ofNullable( + config.getDuration(DefaultDriverOption.SSL_KEYSTORE_RELOAD_INTERVAL, null)); + + return ReloadingKeyManagerFactory.create(keystorePath, password, reloadInterval); + } + @Override public void close() throws Exception { - // nothing to do + if (kmf != null) kmf.close(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java index 73cb73660fb..7661325005e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/JdkSslHandlerFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java new file mode 100644 index 00000000000..8a9e11bb2e9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactory.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.ssl; + +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.Provider; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.time.Duration; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.KeyManagerFactorySpi; +import javax.net.ssl.ManagerFactoryParameters; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.X509ExtendedKeyManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ReloadingKeyManagerFactory extends KeyManagerFactory implements AutoCloseable { + private static final Logger logger = LoggerFactory.getLogger(ReloadingKeyManagerFactory.class); + private static final String KEYSTORE_TYPE = "JKS"; + private Path keystorePath; + private String keystorePassword; + private ScheduledExecutorService executor; + private final Spi spi; + + // We're using a single thread executor so this shouldn't need to be volatile, since all updates + // to lastDigest should come from the same thread + private volatile byte[] lastDigest; + + /** + * Create a new {@link ReloadingKeyManagerFactory} with the given keystore file and password, + * reloading from the file's content at the given interval. This function will do an initial + * reload before returning, to confirm that the file exists and is readable. + * + * @param keystorePath the keystore file to reload + * @param keystorePassword the keystore password + * @param reloadInterval the duration between reload attempts. Set to {@link Optional#empty()} to + * disable scheduled reloading. + * @return + */ + static ReloadingKeyManagerFactory create( + Path keystorePath, String keystorePassword, Optional reloadInterval) + throws UnrecoverableKeyException, KeyStoreException, NoSuchAlgorithmException, + CertificateException, IOException { + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + + KeyStore ks; + try (InputStream ksf = Files.newInputStream(keystorePath)) { + ks = KeyStore.getInstance(KEYSTORE_TYPE); + ks.load(ksf, keystorePassword.toCharArray()); + } + kmf.init(ks, keystorePassword.toCharArray()); + + ReloadingKeyManagerFactory reloadingKeyManagerFactory = new ReloadingKeyManagerFactory(kmf); + reloadingKeyManagerFactory.start(keystorePath, keystorePassword, reloadInterval); + return reloadingKeyManagerFactory; + } + + @VisibleForTesting + protected ReloadingKeyManagerFactory(KeyManagerFactory initial) { + this( + new Spi((X509ExtendedKeyManager) initial.getKeyManagers()[0]), + initial.getProvider(), + initial.getAlgorithm()); + } + + private ReloadingKeyManagerFactory(Spi spi, Provider provider, String algorithm) { + super(spi, provider, algorithm); + this.spi = spi; + } + + private void start( + Path keystorePath, String keystorePassword, Optional reloadInterval) { + this.keystorePath = keystorePath; + this.keystorePassword = keystorePassword; + + // Ensure that reload is called once synchronously, to make sure the file exists etc. + reload(); + + if (!reloadInterval.isPresent() || reloadInterval.get().isZero()) { + final String msg = + "KeyStore reloading is disabled. If your Cassandra cluster requires client certificates, " + + "client application restarts are infrequent, and client certificates have short lifetimes, then your client " + + "may fail to re-establish connections to Cassandra hosts. To enable KeyStore reloading, see " + + "`advanced.ssl-engine-factory.keystore-reload-interval` in reference.conf."; + logger.info(msg); + } else { + logger.info("KeyStore reloading is enabled with interval {}", reloadInterval.get()); + + this.executor = + Executors.newScheduledThreadPool( + 1, + runnable -> { + Thread t = Executors.defaultThreadFactory().newThread(runnable); + t.setName(String.format("%s-%%d", this.getClass().getSimpleName())); + t.setDaemon(true); + return t; + }); + this.executor.scheduleWithFixedDelay( + this::reload, + reloadInterval.get().toMillis(), + reloadInterval.get().toMillis(), + TimeUnit.MILLISECONDS); + } + } + + @VisibleForTesting + void reload() { + try { + reload0(); + } catch (Exception e) { + String msg = + "Failed to reload KeyStore. If this continues to happen, your client may use stale identity" + + " certificates and fail to re-establish connections to Cassandra hosts."; + logger.warn(msg, e); + } + } + + private synchronized void reload0() + throws NoSuchAlgorithmException, IOException, KeyStoreException, CertificateException, + UnrecoverableKeyException { + logger.debug("Checking KeyStore file {} for updates", keystorePath); + + final byte[] keyStoreBytes = Files.readAllBytes(keystorePath); + final byte[] newDigest = digest(keyStoreBytes); + if (lastDigest != null && Arrays.equals(lastDigest, digest(keyStoreBytes))) { + logger.debug("KeyStore file content has not changed; skipping update"); + return; + } + + final KeyStore keyStore = KeyStore.getInstance(KEYSTORE_TYPE); + try (InputStream inputStream = new ByteArrayInputStream(keyStoreBytes)) { + keyStore.load(inputStream, keystorePassword.toCharArray()); + } + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, keystorePassword.toCharArray()); + logger.info("Detected updates to KeyStore file {}", keystorePath); + + this.spi.keyManager.set((X509ExtendedKeyManager) kmf.getKeyManagers()[0]); + this.lastDigest = newDigest; + } + + @Override + public void close() throws Exception { + if (executor != null) { + executor.shutdown(); + } + } + + private static byte[] digest(byte[] payload) throws NoSuchAlgorithmException { + final MessageDigest digest = MessageDigest.getInstance("SHA-256"); + return digest.digest(payload); + } + + private static class Spi extends KeyManagerFactorySpi { + DelegatingKeyManager keyManager; + + Spi(X509ExtendedKeyManager initial) { + this.keyManager = new DelegatingKeyManager(initial); + } + + @Override + protected void engineInit(KeyStore ks, char[] password) { + throw new UnsupportedOperationException(); + } + + @Override + protected void engineInit(ManagerFactoryParameters spec) { + throw new UnsupportedOperationException(); + } + + @Override + protected KeyManager[] engineGetKeyManagers() { + return new KeyManager[] {keyManager}; + } + } + + private static class DelegatingKeyManager extends X509ExtendedKeyManager { + AtomicReference delegate; + + DelegatingKeyManager(X509ExtendedKeyManager initial) { + delegate = new AtomicReference<>(initial); + } + + void set(X509ExtendedKeyManager keyManager) { + delegate.set(keyManager); + } + + @Override + public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) { + return delegate.get().chooseEngineClientAlias(keyType, issuers, engine); + } + + @Override + public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) { + return delegate.get().chooseEngineServerAlias(keyType, issuers, engine); + } + + @Override + public String[] getClientAliases(String keyType, Principal[] issuers) { + return delegate.get().getClientAliases(keyType, issuers); + } + + @Override + public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) { + return delegate.get().chooseClientAlias(keyType, issuers, socket); + } + + @Override + public String[] getServerAliases(String keyType, Principal[] issuers) { + return delegate.get().getServerAliases(keyType, issuers); + } + + @Override + public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) { + return delegate.get().chooseServerAlias(keyType, issuers, socket); + } + + @Override + public X509Certificate[] getCertificateChain(String alias) { + return delegate.get().getCertificateChain(alias); + } + + @Override + public PrivateKey getPrivateKey(String alias) { + return delegate.get().getPrivateKey(alias); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java new file mode 100644 index 00000000000..4d2cb69fbfc --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SniSslEngineFactory.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datastax.oss.driver.internal.core.ssl; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; +import com.datastax.oss.driver.internal.core.metadata.SniEndPoint; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.net.InetSocketAddress; +import java.util.concurrent.CopyOnWriteArrayList; +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; + +public class SniSslEngineFactory implements SslEngineFactory { + + // An offset that gets added to our "fake" ports (see below). We pick this value because it is the + // start of the ephemeral port range. + private static final int FAKE_PORT_OFFSET = 49152; + + private final SSLContext sslContext; + private final CopyOnWriteArrayList fakePorts = new CopyOnWriteArrayList<>(); + private final boolean allowDnsReverseLookupSan; + + public SniSslEngineFactory(SSLContext sslContext) { + this(sslContext, true); + } + + public SniSslEngineFactory(SSLContext sslContext, boolean allowDnsReverseLookupSan) { + this.sslContext = sslContext; + this.allowDnsReverseLookupSan = allowDnsReverseLookupSan; + } + + @NonNull + @Override + public SSLEngine newSslEngine(@NonNull EndPoint remoteEndpoint) { + if (!(remoteEndpoint instanceof SniEndPoint)) { + throw new IllegalArgumentException( + String.format( + "Configuration error: can only use %s with SNI end points", + this.getClass().getSimpleName())); + } + SniEndPoint sniEndPoint = (SniEndPoint) remoteEndpoint; + InetSocketAddress address = sniEndPoint.resolve(); + String sniServerName = sniEndPoint.getServerName(); + + // When hostname verification is enabled (with setEndpointIdentificationAlgorithm), the SSL + // engine will try to match the server's certificate against the SNI host name; if that doesn't + // work, it will fall back to the "advisory peer host" passed to createSSLEngine. + // + // In our case, the first check will never succeed because our SNI host name is not the DNS name + // (we use the Cassandra host_id instead). So we *must* set the advisory peer information. + // + // However if we use the address as-is, this leads to another issue: the advisory peer + // information is also used to cache SSL sessions internally. All of our nodes share the same + // proxy address, so the JDK tries to reuse SSL sessions across nodes. But it doesn't update the + // SNI host name every time, so it ends up opening connections to the wrong node. + // + // To avoid that, we create a unique "fake" port for every node. We still get session reuse for + // a given node, but not across nodes. This is safe because the advisory port is only used for + // session caching. + String peerHost = allowDnsReverseLookupSan ? address.getHostName() : address.getHostString(); + SSLEngine engine = sslContext.createSSLEngine(peerHost, getFakePort(sniServerName)); + engine.setUseClientMode(true); + SSLParameters parameters = engine.getSSLParameters(); + parameters.setServerNames(ImmutableList.of(new SNIHostName(sniServerName))); + parameters.setEndpointIdentificationAlgorithm("HTTPS"); + engine.setSSLParameters(parameters); + return engine; + } + + private int getFakePort(String sniServerName) { + fakePorts.addIfAbsent(sniServerName); + return FAKE_PORT_OFFSET + fakePorts.indexOf(sniServerName); + } + + @Override + public void close() { + // nothing to do + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java index 3a96b067ada..87bea563796 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/ssl/SslHandlerFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java index 28bd5fdf2e4..351ed96d66f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,7 +48,7 @@ @ThreadSafe public class AtomicTimestampGenerator extends MonotonicTimestampGenerator { - private AtomicLong lastRef = new AtomicLong(0); + private final AtomicLong lastRef = new AtomicLong(0); public AtomicTimestampGenerator(DriverContext context) { super(context); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java index 4a12a788068..e576b13a74b 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/Clock.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +28,14 @@ public interface Clock { Logger LOG = LoggerFactory.getLogger(Clock.class); - /** Returns the best implementation for the current platform. */ + /** + * Returns the best implementation for the current platform. + * + *

      Usage with non-blocking threads: beware that this method may block the calling thread on its + * very first invocation, because native libraries used by the driver will be loaded at that + * moment. If that is a problem, consider invoking this method once from a thread that is allowed + * to block. Subsequent invocations are guaranteed not to block. + */ static Clock getInstance(boolean forceJavaClock) { if (forceJavaClock) { LOG.info("Using Java system clock because this was explicitly required in the configuration"); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java index 449d298019f..b6dfbebcdb0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/JavaClock.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java index 9fa0bf482bf..99a520d02b1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java index 7e6caf73a05..51265ead820 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/NativeClock.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java index 1e9f6c52eeb..0df056deb04 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ServerSideTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.time; import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.time.TimestampGenerator; import net.jcip.annotations.ThreadSafe; @@ -45,7 +48,7 @@ public ServerSideTimestampGenerator(@SuppressWarnings("unused") DriverContext co @Override public long next() { - return Long.MIN_VALUE; + return Statement.NO_DEFAULT_TIMESTAMP; } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java index 511a3a2e395..598ae5cbbc2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGenerator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java new file mode 100644 index 00000000000..6fe2ba059bd --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTracker.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.tracker; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.internal.core.util.Loggers; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Combines multiple request trackers into a single one. + * + *

      Any exception thrown by a child tracker is caught and logged. + */ +@ThreadSafe +public class MultiplexingRequestTracker implements RequestTracker { + + private static final Logger LOG = LoggerFactory.getLogger(MultiplexingRequestTracker.class); + + private final List trackers = new CopyOnWriteArrayList<>(); + + public MultiplexingRequestTracker() {} + + public MultiplexingRequestTracker(RequestTracker... trackers) { + this(Arrays.asList(trackers)); + } + + public MultiplexingRequestTracker(Collection trackers) { + addTrackers(trackers); + } + + private void addTrackers(Collection source) { + for (RequestTracker tracker : source) { + addTracker(tracker); + } + } + + private void addTracker(RequestTracker toAdd) { + Objects.requireNonNull(toAdd, "tracker cannot be null"); + if (toAdd instanceof MultiplexingRequestTracker) { + addTrackers(((MultiplexingRequestTracker) toAdd).trackers); + } else { + trackers.add(toAdd); + } + } + + public void register(@NonNull RequestTracker tracker) { + addTracker(tracker); + } + + @Override + public void onSuccess( + @NonNull Request request, + long latencyNanos, + @NonNull DriverExecutionProfile executionProfile, + @NonNull Node node, + @NonNull String sessionRequestLogPrefix) { + invokeTrackers( + tracker -> + tracker.onSuccess( + request, latencyNanos, executionProfile, node, sessionRequestLogPrefix), + sessionRequestLogPrefix, + "onSuccess"); + } + + @Override + public void onError( + @NonNull Request request, + @NonNull Throwable error, + long latencyNanos, + @NonNull DriverExecutionProfile executionProfile, + @Nullable Node node, + @NonNull String sessionRequestLogPrefix) { + invokeTrackers( + tracker -> + tracker.onError( + request, error, latencyNanos, executionProfile, node, sessionRequestLogPrefix), + sessionRequestLogPrefix, + "onError"); + } + + @Override + public void onNodeSuccess( + @NonNull Request request, + long latencyNanos, + @NonNull DriverExecutionProfile executionProfile, + @NonNull Node node, + @NonNull String nodeRequestLogPrefix) { + invokeTrackers( + tracker -> + tracker.onNodeSuccess( + request, latencyNanos, executionProfile, node, nodeRequestLogPrefix), + nodeRequestLogPrefix, + "onNodeSuccess"); + } + + @Override + public void onNodeError( + @NonNull Request request, + @NonNull Throwable error, + long latencyNanos, + @NonNull DriverExecutionProfile executionProfile, + @NonNull Node node, + @NonNull String nodeRequestLogPrefix) { + invokeTrackers( + tracker -> + tracker.onNodeError( + request, error, latencyNanos, executionProfile, node, nodeRequestLogPrefix), + nodeRequestLogPrefix, + "onNodeError"); + } + + @Override + public void onSessionReady(@NonNull Session session) { + invokeTrackers(tracker -> tracker.onSessionReady(session), session.getName(), "onSessionReady"); + } + + @Override + public void close() throws Exception { + for (RequestTracker tracker : trackers) { + try { + tracker.close(); + } catch (Exception e) { + Loggers.warnWithException( + LOG, "Unexpected error while closing request tracker {}.", tracker, e); + } + } + } + + private void invokeTrackers( + @NonNull Consumer action, String logPrefix, String event) { + for (RequestTracker tracker : trackers) { + try { + action.accept(tracker); + } catch (Exception e) { + Loggers.warnWithException( + LOG, + "[{}] Unexpected error while notifying request tracker {} of an {} event.", + logPrefix, + tracker, + event, + e); + } + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java index 0cec5dd2691..3821c6ace2d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/NoopRequestTracker.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,29 +21,13 @@ import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.api.core.tracker.RequestTracker; import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.ThreadSafe; /** - * A no-op request tracker. - * - *

      To activate this tracker, modify the {@code advanced.request-tracker} section in the driver - * configuration, for example: - * - *

      - * datastax-java-driver {
      - *   advanced.request-tracker {
      - *     class = NoopRequestTracker
      - *   }
      - * }
      - * 
      - * - * See {@code reference.conf} (in the manual or core driver JAR) for more details. - * - *

      Note that if a tracker is specified programmatically with {@link - * SessionBuilder#withRequestTracker(RequestTracker)}, the configuration is ignored. + * Default request tracker implementation with empty methods. This implementation is used when no + * trackers were registered, neither programmatically nor through the configuration. */ @ThreadSafe public class NoopRequestTracker implements RequestTracker { @@ -56,7 +42,7 @@ public void onSuccess( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String requestPrefix) { + @NonNull String sessionRequestLogPrefix) { // nothing to do } @@ -67,7 +53,7 @@ public void onError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, Node node, - @NonNull String requestPrefix) { + @NonNull String sessionRequestLogPrefix) { // nothing to do } @@ -78,7 +64,7 @@ public void onNodeError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String requestPrefix) { + @NonNull String nodeRequestLogPrefix) { // nothing to do } @@ -88,7 +74,7 @@ public void onNodeSuccess( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String requestPrefix) { + @NonNull String nodeRequestLogPrefix) { // nothing to do } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java index fd49a2e92d9..808d08e228d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java index bdd99fc5ab7..f242ff89c54 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/RequestLogger.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,7 +40,7 @@ *

        * datastax-java-driver {
        *   advanced.request-tracker {
      - *     class = RequestLogger
      + *     classes = [RequestLogger]
        *     logs {
        *       success { enabled = true }
        *       slow { enabled = true, threshold = 1 second }
      @@ -56,7 +58,7 @@
        * See {@code reference.conf} (in the manual or core driver JAR) for more details.
        *
        * 

      Note that if a tracker is specified programmatically with {@link - * SessionBuilder#withRequestTracker(RequestTracker)}, the configuration is ignored. + * SessionBuilder#addRequestTracker(RequestTracker)}, the configuration is ignored. */ @ThreadSafe public class RequestLogger implements RequestTracker { @@ -84,7 +86,7 @@ public void onSuccess( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String logPrefix) { + @NonNull String sessionRequestLogPrefix) { boolean successEnabled = executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, false); @@ -127,7 +129,7 @@ public void onSuccess( showValues, maxValues, maxValueLength, - logPrefix); + sessionRequestLogPrefix); } @Override @@ -137,7 +139,7 @@ public void onError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, Node node, - @NonNull String logPrefix) { + @NonNull String sessionRequestLogPrefix) { if (!executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, false)) { return; @@ -171,7 +173,7 @@ public void onError( maxValues, maxValueLength, showStackTraces, - logPrefix); + sessionRequestLogPrefix); } @Override @@ -181,7 +183,7 @@ public void onNodeError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String logPrefix) { + @NonNull String nodeRequestLogPrefix) { // Nothing to do } @@ -191,7 +193,7 @@ public void onNodeSuccess( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String logPrefix) { + @NonNull String nodeRequestLogPrefix) { // Nothing to do } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java new file mode 100644 index 00000000000..cc07d6717f4 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/UuidRequestIdGenerator.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.tracker; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import edu.umd.cs.findbugs.annotations.NonNull; + +public class UuidRequestIdGenerator implements RequestIdGenerator { + public UuidRequestIdGenerator(DriverContext context) {} + + /** Generates a random v4 UUID. */ + @Override + public String getSessionRequestId() { + return Uuids.random().toString(); + } + + /** + * {session-request-id}-{random-uuid} All node requests for a session request will have the same + * session request id + */ + @Override + public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { + return parentId + "-" + Uuids.random(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java new file mode 100644 index 00000000000..fe15b93bc8e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/tracker/W3CContextRequestIdGenerator.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.tracker; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; +import com.datastax.oss.driver.shaded.guava.common.io.BaseEncoding; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.security.SecureRandom; +import java.util.Random; + +public class W3CContextRequestIdGenerator implements RequestIdGenerator { + + private final Random random = new SecureRandom(); + private final BaseEncoding baseEncoding = BaseEncoding.base16().lowerCase(); + private final String payloadKey; + + public W3CContextRequestIdGenerator(DriverContext context) { + payloadKey = RequestIdGenerator.super.getCustomPayloadKey(); + } + + public W3CContextRequestIdGenerator(String payloadKey) { + this.payloadKey = payloadKey; + } + + /** Random 16 bytes, e.g. "4bf92f3577b34da6a3ce929d0e0e4736" */ + @Override + public String getSessionRequestId() { + byte[] bytes = new byte[16]; + random.nextBytes(bytes); + return baseEncoding.encode(bytes); + } + + /** + * Following the format of W3C "traceparent" spec, + * https://www.w3.org/TR/trace-context/#traceparent-header-field-values e.g. + * "00-4bf92f3577b34da6a3ce929d0e0e4736-a3ce929d0e0e4736-01" All node requests in the same session + * request share the same "trace-id" field value + */ + @Override + public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { + byte[] bytes = new byte[8]; + random.nextBytes(bytes); + return String.format("00-%s-%s-00", parentId, baseEncoding.encode(bytes)); + } + + @Override + public String getCustomPayloadKey() { + return this.payloadKey; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java index 97e23446227..1e02a6b8e82 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DataTypeHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java index 3b2f22ff302..7b9e03818ac 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultCustomType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java index f8b93ed5696..6c21b44639e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultListType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java index 316975484e2..8da9f196f26 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultMapType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java index 285315a6b20..27641731c72 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultSetType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java index efecfdf540e..29b1b20436f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultTupleType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java index 92cfe72fe14..6b1431dc699 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultUserDefinedType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -109,11 +111,23 @@ public List getFieldNames() { return fieldNames; } + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + return index.allIndicesOf(id); + } + @Override public int firstIndexOf(@NonNull CqlIdentifier id) { return index.firstIndexOf(id); } + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + return index.allIndicesOf(name); + } + @Override public int firstIndexOf(@NonNull String name) { return index.firstIndexOf(name); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java new file mode 100644 index 00000000000..0b1ced94769 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/DefaultVectorType.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type; + +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.VectorType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultVectorType implements VectorType { + + public static final String VECTOR_CLASS_NAME = "org.apache.cassandra.db.marshal.VectorType"; + + private final DataType subtype; + private final int dimensions; + + public DefaultVectorType(DataType subtype, int dimensions) { + + this.dimensions = dimensions; + this.subtype = subtype; + } + + /* ============== ContainerType interface ============== */ + @Override + public DataType getElementType() { + return this.subtype; + } + + /* ============== VectorType interface ============== */ + @Override + public int getDimensions() { + return this.dimensions; + } + + /* ============== CustomType interface ============== */ + @NonNull + @Override + public String getClassName() { + return VECTOR_CLASS_NAME; + } + + @NonNull + @Override + public String asCql(boolean includeFrozen, boolean pretty) { + return String.format("vector<%s, %d>", getElementType().asCql(true, false), getDimensions()); + } + + /* ============== General class implementation ============== */ + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } else if (o instanceof DefaultVectorType) { + DefaultVectorType that = (DefaultVectorType) o; + return that.subtype.equals(this.subtype) && that.dimensions == this.dimensions; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(DefaultVectorType.class, subtype, dimensions); + } + + @Override + public String toString() { + return String.format("Vector(%s, %d)", getElementType(), getDimensions()); + } + + @Override + public boolean isDetached() { + return false; + } + + @Override + public void attach(@NonNull AttachmentPoint attachmentPoint) { + // nothing to do + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java index 909a58d053a..c6f815a7487 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/PrimitiveType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.protocol.internal.ProtocolConstants; import edu.umd.cs.findbugs.annotations.NonNull; import java.io.Serializable; +import java.util.Locale; import net.jcip.annotations.Immutable; @Immutable @@ -67,7 +70,7 @@ public int hashCode() { @NonNull @Override public String asCql(boolean includeFrozen, boolean pretty) { - return codeName(protocolCode).toLowerCase(); + return codeName(protocolCode).toLowerCase(Locale.ROOT); } @Override diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java index 1bd04ad005d..43e05f17690 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/UserDefinedTypeBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.type; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; @@ -37,6 +40,7 @@ public class UserDefinedTypeBuilder { private boolean frozen; private final ImmutableList.Builder fieldNames; private final ImmutableList.Builder fieldTypes; + private AttachmentPoint attachmentPoint = AttachmentPoint.NONE; public UserDefinedTypeBuilder(CqlIdentifier keyspaceName, CqlIdentifier typeName) { this.keyspaceName = keyspaceName; @@ -69,8 +73,13 @@ public UserDefinedTypeBuilder frozen() { return this; } + public UserDefinedTypeBuilder withAttachmentPoint(AttachmentPoint attachmentPoint) { + this.attachmentPoint = attachmentPoint; + return this; + } + public UserDefinedType build() { return new DefaultUserDefinedType( - keyspaceName, typeName, frozen, fieldNames.build(), fieldTypes.build()); + keyspaceName, typeName, frozen, fieldNames.build(), fieldTypes.build(), attachmentPoint); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java index 7ba465543bf..8496da17fa6 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; import net.jcip.annotations.ThreadSafe; @ThreadSafe @@ -88,4 +91,10 @@ public Long parse(@Nullable String value) { String.format("Cannot parse 64-bits long value from \"%s\"", value)); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(8); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java index 4aeed77b00b..1f5fcd5eeaa 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,12 @@ import java.nio.ByteBuffer; import net.jcip.annotations.ThreadSafe; +/** + * A codec that maps the CQL type {@code blob} to the Java type {@link ByteBuffer}. + * + *

      If you are looking for a codec mapping the CQL type {@code blob} to the Java type {@code + * byte[]}, you should use {@link SimpleBlobCodec} instead. + */ @ThreadSafe public class BlobCodec implements TypeCodec { @NonNull diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java index 8768311f2ac..af388982be9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; import net.jcip.annotations.ThreadSafe; @ThreadSafe @@ -96,4 +99,10 @@ public Boolean parse(@Nullable String value) { String.format("Cannot parse boolean value from \"%s\"", value)); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(1); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java index 3d0347eebe6..ab90ba09c20 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java index 0a1a49e7016..90f6f56cf06 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java index b1e1d204bfd..61a854e88d8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java index 82c11e73dc4..2fc463ef7d2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DateCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java index 49a128e423d..25650b733cd 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java index 91071c0d847..b01847517d9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; import net.jcip.annotations.ThreadSafe; @ThreadSafe @@ -88,4 +91,10 @@ public Double parse(@Nullable String value) { String.format("Cannot parse 64-bits double value from \"%s\"", value)); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(8); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java index a3ff38a2b83..fd851edfad3 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; import net.jcip.annotations.ThreadSafe; @ThreadSafe @@ -88,4 +91,10 @@ public Float parse(@Nullable String value) { String.format("Cannot parse 32-bits float value from \"%s\"", value)); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(4); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java index efc7c254d21..167c7109bf9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/InetCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java index 702c9a40d2d..b11b164a445 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/IntCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; import net.jcip.annotations.ThreadSafe; @ThreadSafe @@ -88,4 +91,10 @@ public Integer parse(@Nullable String value) { String.format("Cannot parse 32-bits int value from \"%s\"", value)); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(4); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java index dd4001e3930..d587bbd5887 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ListCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -115,11 +117,19 @@ public List decode( int size = input.getInt(); List result = new ArrayList<>(size); for (int i = 0; i < size; i++) { + ElementT element; int elementSize = input.getInt(); - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - input.position(input.position() + elementSize); - result.add(elementCodec.decode(encodedElement, protocolVersion)); + // Allow null elements on the decode path, because Cassandra might return such collections + // for some computed values in the future -- e.g. SELECT ttl(some_collection) + if (elementSize < 0) { + element = null; + } else { + ByteBuffer encodedElement = input.slice(); + encodedElement.limit(elementSize); + element = elementCodec.decode(encodedElement, protocolVersion); + input.position(input.position() + elementSize); + } + result.add(element); } return result; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java index 4f330b3ab59..999f41bf207 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/MapCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -134,18 +136,28 @@ public Map decode( int size = input.getInt(); Map result = Maps.newLinkedHashMapWithExpectedSize(size); for (int i = 0; i < size; i++) { + KeyT key; int keySize = input.getInt(); - ByteBuffer encodedKey = input.slice(); - encodedKey.limit(keySize); - input.position(input.position() + keySize); - KeyT key = keyCodec.decode(encodedKey, protocolVersion); - + // Allow null elements on the decode path, because Cassandra might return such collections + // for some computed values in the future -- e.g. SELECT ttl(some_collection) + if (keySize < 0) { + key = null; + } else { + ByteBuffer encodedKey = input.slice(); + encodedKey.limit(keySize); + key = keyCodec.decode(encodedKey, protocolVersion); + input.position(input.position() + keySize); + } + ValueT value; int valueSize = input.getInt(); - ByteBuffer encodedValue = input.slice(); - encodedValue.limit(valueSize); - input.position(input.position() + valueSize); - ValueT value = valueCodec.decode(encodedValue, protocolVersion); - + if (valueSize < 0) { + value = null; + } else { + ByteBuffer encodedValue = input.slice(); + encodedValue.limit(valueSize); + value = valueCodec.decode(encodedValue, protocolVersion); + input.position(input.position() + valueSize); + } result.put(key, value); } return result; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java index c8aa5e7df9b..a52130a093d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ParseUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +27,7 @@ public class ParseUtils { * @return the index of the first character in toParse from idx that is not a "space. */ public static int skipSpaces(String toParse, int idx) { - while (isBlank(toParse.charAt(idx)) && idx < toParse.length()) ++idx; + while (idx < toParse.length() && isBlank(toParse.charAt(idx))) ++idx; return idx; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java index 7dc0c930c6e..fc4c0887516 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SetCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -116,11 +118,19 @@ public Set decode( int size = input.getInt(); Set result = Sets.newLinkedHashSetWithExpectedSize(size); for (int i = 0; i < size; i++) { + ElementT element; int elementSize = input.getInt(); - ByteBuffer encodedElement = input.slice(); - encodedElement.limit(elementSize); - input.position(input.position() + elementSize); - result.add(elementCodec.decode(encodedElement, protocolVersion)); + // Allow null elements on the decode path, because Cassandra might return such collections + // for some computed values in the future -- e.g. SELECT ttl(some_collection) + if (elementSize < 0) { + element = null; + } else { + ByteBuffer encodedElement = input.slice(); + encodedElement.limit(elementSize); + element = elementCodec.decode(encodedElement, protocolVersion); + input.position(input.position() + elementSize); + } + result.add(element); } return result; } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java new file mode 100644 index 00000000000..9f90feb8e7c --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodec.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import com.datastax.oss.driver.api.core.data.ByteUtils; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.extras.array.ByteListToArrayCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code blob} to the Java type {@code byte[]}. + * + *

      If you are looking for a codec mapping the CQL type {@code blob} to the Java type {@link + * ByteBuffer}, you should use {@link BlobCodec} instead. + * + *

      If you are looking for a codec mapping the CQL type {@code list { + + public SimpleBlobCodec() { + super(TypeCodecs.BLOB, GenericType.of(byte[].class)); + } + + @Override + public boolean accepts(@NonNull Object value) { + return value instanceof byte[]; + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return byte[].class.equals(javaClass); + } + + @Nullable + @Override + protected byte[] innerToOuter(@Nullable ByteBuffer value) { + return value == null ? null : ByteUtils.getArray(value); + } + + @Nullable + @Override + protected ByteBuffer outerToInner(@Nullable byte[] value) { + return value == null ? null : ByteBuffer.wrap(value); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java index d8ec3c2d414..08beb0b34c5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java index bffe3a10fd1..2a9acdd8c47 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/StringCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,22 +22,47 @@ import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import io.netty.util.concurrent.FastThreadLocal; import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CodingErrorAction; import net.jcip.annotations.ThreadSafe; @ThreadSafe public class StringCodec implements TypeCodec { private final DataType cqlType; - private final Charset charset; + private final FastThreadLocal charsetEncoder; + private final FastThreadLocal charsetDecoder; public StringCodec(@NonNull DataType cqlType, @NonNull Charset charset) { this.cqlType = cqlType; - this.charset = charset; + charsetEncoder = + new FastThreadLocal() { + @Override + protected CharsetEncoder initialValue() throws Exception { + return charset + .newEncoder() + .onMalformedInput(CodingErrorAction.REPORT) + .onUnmappableCharacter(CodingErrorAction.REPORT); + } + }; + charsetDecoder = + new FastThreadLocal() { + @Override + protected CharsetDecoder initialValue() throws Exception { + return charset + .newDecoder() + .onMalformedInput(CodingErrorAction.REPORT) + .onUnmappableCharacter(CodingErrorAction.REPORT); + } + }; } @NonNull @@ -63,7 +90,14 @@ public boolean accepts(@NonNull Class javaClass) { @Nullable @Override public ByteBuffer encode(@Nullable String value, @NonNull ProtocolVersion protocolVersion) { - return (value == null) ? null : ByteBuffer.wrap(value.getBytes(charset)); + if (value == null) { + return null; + } + try { + return charsetEncoder.get().encode(CharBuffer.wrap(value)); + } catch (CharacterCodingException e) { + throw new IllegalArgumentException(e); + } } @Nullable @@ -74,7 +108,11 @@ public String decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protoc } else if (bytes.remaining() == 0) { return ""; } else { - return new String(Bytes.getArray(bytes), charset); + try { + return charsetDecoder.get().decode(bytes.duplicate()).toString(); + } catch (CharacterCodingException e) { + throw new IllegalArgumentException(e); + } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java index 5d862982acf..4977687342d 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java index a8866fada21..95744f63ee3 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java index aa7d147581f..964f774c8d9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,6 +33,7 @@ import java.time.Instant; import java.time.ZoneId; import java.util.Date; +import java.util.Optional; import java.util.TimeZone; import net.jcip.annotations.ThreadSafe; @@ -96,7 +99,7 @@ * * * By default, when parsing, timestamp literals that do not include any time zone information will - * be interpreted using the system's {@linkplain TimeZone#getDefault() default time zone}. This is + * be interpreted using the system's {@linkplain ZoneId#systemDefault() default time zone}. This is * intended to mimic Apache Cassandra(R)'s own parsing behavior (see {@code * org.apache.cassandra.serializers.TimestampSerializer}). The default time zone can be modified * using the {@linkplain TimestampCodec#TimestampCodec(ZoneId) one-arg constructor} that takes a @@ -291,4 +294,10 @@ public Instant parse(@Nullable String value) { String.format("Cannot parse timestamp value from \"%s\"", value)); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(8); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java index 27241aa7833..13bf79b70d5 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java index 2a900ce7a10..cc85266682c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -109,7 +111,7 @@ public TupleValue decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion pr } int elementSize = input.getInt(); ByteBuffer element; - if (elementSize == -1) { + if (elementSize < 0) { element = null; } else { element = input.slice(); @@ -161,59 +163,85 @@ public TupleValue parse(@Nullable String value) { } TupleValue tuple = cqlType.newValue(); + int length = value.length(); int position = ParseUtils.skipSpaces(value, 0); - if (value.charAt(position++) != '(') { + if (value.charAt(position) != '(') { throw new IllegalArgumentException( String.format( "Cannot parse tuple value from \"%s\", at character %d expecting '(' but got '%c'", value, position, value.charAt(position))); } + position++; position = ParseUtils.skipSpaces(value, position); - if (value.charAt(position) == ')') { - return tuple; - } - CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - int i = 0; - while (position < value.length()) { + int field = 0; + while (position < length) { + if (value.charAt(position) == ')') { + position = ParseUtils.skipSpaces(value, position + 1); + if (position == length) { + return tuple; + } + throw new IllegalArgumentException( + String.format( + "Cannot parse tuple value from \"%s\", at character %d expecting EOF or blank, but got \"%s\"", + value, position, value.substring(position))); + } int n; try { n = ParseUtils.skipCQLValue(value, position); } catch (IllegalArgumentException e) { throw new IllegalArgumentException( String.format( - "Cannot parse tuple value from \"%s\", invalid CQL value at character %d", - value, position), + "Cannot parse tuple value from \"%s\", invalid CQL value at field %d (character %d)", + value, field, position), e); } String fieldValue = value.substring(position, n); - DataType elementType = cqlType.getComponentTypes().get(i); + DataType elementType = cqlType.getComponentTypes().get(field); TypeCodec codec = registry.codecFor(elementType); - tuple = tuple.set(i, codec.parse(fieldValue), codec); + Object parsed; + try { + parsed = codec.parse(fieldValue); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format( + "Cannot parse tuple value from \"%s\", invalid CQL value at field %d (character %d): %s", + value, field, position, e.getMessage()), + e); + } + tuple = tuple.set(field, parsed, codec); position = n; - i += 1; position = ParseUtils.skipSpaces(value, position); + if (position == length) { + throw new IllegalArgumentException( + String.format( + "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting ',' or ')', but got EOF", + value, field, position)); + } if (value.charAt(position) == ')') { - return tuple; + continue; } if (value.charAt(position) != ',') { throw new IllegalArgumentException( String.format( - "Cannot parse tuple value from \"%s\", at character %d expecting ',' but got '%c'", - value, position, value.charAt(position))); + "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting ',' but got '%c'", + value, field, position, value.charAt(position))); } ++position; // skip ',' position = ParseUtils.skipSpaces(value, position); + field += 1; } throw new IllegalArgumentException( - String.format("Malformed tuple value \"%s\", missing closing ')'", value)); + String.format( + "Cannot parse tuple value from \"%s\", at field %d (character %d) expecting CQL value or ')', got EOF", + value, field, position)); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java index 2e2df95ad33..5d0a379f761 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,10 +30,14 @@ import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @ThreadSafe public class UdtCodec implements TypeCodec { + private static final Logger LOG = LoggerFactory.getLogger(UdtCodec.class); + private final UserDefinedType cqlType; public UdtCodec(@NonNull UserDefinedType cqlType) { @@ -104,15 +110,13 @@ public UdtValue decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion prot UdtValue value = cqlType.newValue(); int i = 0; while (input.hasRemaining()) { - if (i > cqlType.getFieldTypes().size()) { - throw new IllegalArgumentException( - String.format( - "Too many fields in encoded UDT value, expected %d", - cqlType.getFieldTypes().size())); + if (i == cqlType.getFieldTypes().size()) { + LOG.debug("Encountered unexpected fields when parsing codec {}", cqlType); + break; } int elementSize = input.getInt(); ByteBuffer element; - if (elementSize == -1) { + if (elementSize < 0) { element = null; } else { element = input.slice(); @@ -165,24 +169,40 @@ public UdtValue parse(@Nullable String value) { } UdtValue udt = cqlType.newValue(); + int length = value.length(); int position = ParseUtils.skipSpaces(value, 0); - if (value.charAt(position++) != '{') { + if (value.charAt(position) != '{') { throw new IllegalArgumentException( String.format( - "Cannot parse UDT value from \"%s\", at character %d expecting '{' but got '%c'", + "Cannot parse UDT value from \"%s\" at character %d: expecting '{' but got '%c'", value, position, value.charAt(position))); } + position++; position = ParseUtils.skipSpaces(value, position); - if (value.charAt(position) == '}') { - return udt; + if (position == length) { + throw new IllegalArgumentException( + String.format( + "Cannot parse UDT value from \"%s\" at character %d: expecting CQL identifier or '}', got EOF", + value, position)); } CodecRegistry registry = cqlType.getAttachmentPoint().getCodecRegistry(); - while (position < value.length()) { + CqlIdentifier id = null; + while (position < length) { + if (value.charAt(position) == '}') { + position = ParseUtils.skipSpaces(value, position + 1); + if (position == length) { + return udt; + } + throw new IllegalArgumentException( + String.format( + "Cannot parse UDT value from \"%s\", at character %d expecting EOF or blank, but got \"%s\"", + value, position, value.substring(position))); + } int n; try { n = ParseUtils.skipCQLId(value, position); @@ -193,21 +213,30 @@ public UdtValue parse(@Nullable String value) { value, position), e); } - CqlIdentifier id = CqlIdentifier.fromInternal(value.substring(position, n)); + id = CqlIdentifier.fromInternal(value.substring(position, n)); position = n; if (!cqlType.contains(id)) { throw new IllegalArgumentException( - String.format("Unknown field %s in value \"%s\"", id, value)); + String.format( + "Cannot parse UDT value from \"%s\", unknown CQL identifier at character %d: \"%s\"", + value, position, id)); } position = ParseUtils.skipSpaces(value, position); - if (value.charAt(position++) != ':') { + if (position == length) { + throw new IllegalArgumentException( + String.format( + "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ':', but got EOF", + value, id, position)); + } + if (value.charAt(position) != ':') { throw new IllegalArgumentException( String.format( - "Cannot parse UDT value from \"%s\", at character %d expecting ':' but got '%c'", - value, position, value.charAt(position))); + "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ':', but got '%c'", + value, id, position, value.charAt(position))); } + position++; position = ParseUtils.skipSpaces(value, position); try { @@ -215,8 +244,8 @@ public UdtValue parse(@Nullable String value) { } catch (IllegalArgumentException e) { throw new IllegalArgumentException( String.format( - "Cannot parse UDT value from \"%s\", invalid CQL value at character %d", - value, position), + "Cannot parse UDT value from \"%s\", invalid CQL value at field %s (character %d)", + value, id, position), e); } @@ -224,24 +253,42 @@ public UdtValue parse(@Nullable String value) { // This works because ids occur at most once in UDTs DataType fieldType = cqlType.getFieldTypes().get(cqlType.firstIndexOf(id)); TypeCodec codec = registry.codecFor(fieldType); - udt = udt.set(id, codec.parse(fieldValue), codec); + Object parsed; + try { + parsed = codec.parse(fieldValue); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format( + "Cannot parse UDT value from \"%s\", invalid CQL value at field %s (character %d): %s", + value, id, position, e.getMessage()), + e); + } + udt = udt.set(id, parsed, codec); position = n; position = ParseUtils.skipSpaces(value, position); + if (position == length) { + throw new IllegalArgumentException( + String.format( + "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ',' or '}', but got EOF", + value, id, position)); + } if (value.charAt(position) == '}') { - return udt; + continue; } if (value.charAt(position) != ',') { throw new IllegalArgumentException( String.format( - "Cannot parse UDT value from \"%s\", at character %d expecting ',' but got '%c'", - value, position, value.charAt(position))); + "Cannot parse UDT value from \"%s\", at field %s (character %d) expecting ',' but got '%c'", + value, id, position, value.charAt(position))); } ++position; // skip ',' position = ParseUtils.skipSpaces(value, position); } throw new IllegalArgumentException( - String.format("Malformed UDT value \"%s\", missing closing '}'", value)); + String.format( + "Cannot parse UDT value from \"%s\" at field %s (character %d): expecting CQL identifier or '}', got EOF", + value, id, position)); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java index ba3ef0ab110..cc5f48dbe52 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; +import java.util.Optional; import java.util.UUID; import net.jcip.annotations.ThreadSafe; @@ -93,4 +96,10 @@ public UUID parse(@Nullable String value) { String.format("Cannot parse UUID value from \"%s\"", value), e); } } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(16); + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java index eec3ee239b3..b04c959c704 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VarIntCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java new file mode 100644 index 00000000000..1f8ce1a7166 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodec.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.data.CqlVector; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.VectorType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.DefaultVectorType; +import com.datastax.oss.driver.internal.core.type.util.VIntCoding; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Optional; +import java.util.stream.Collectors; + +public class VectorCodec implements TypeCodec> { + + private final VectorType cqlType; + private final GenericType> javaType; + private final TypeCodec subtypeCodec; + + public VectorCodec(@NonNull VectorType cqlType, @NonNull TypeCodec subtypeCodec) { + this.cqlType = cqlType; + this.subtypeCodec = subtypeCodec; + this.javaType = GenericType.vectorOf(subtypeCodec.getJavaType()); + } + + public VectorCodec(int dimensions, @NonNull TypeCodec subtypeCodec) { + this(new DefaultVectorType(subtypeCodec.getCqlType(), dimensions), subtypeCodec); + } + + @NonNull + @Override + public GenericType> getJavaType() { + return this.javaType; + } + + @NonNull + @Override + public Optional serializedSize() { + return subtypeCodec.serializedSize().isPresent() + ? Optional.of(subtypeCodec.serializedSize().get() * cqlType.getDimensions()) + : Optional.empty(); + } + + @NonNull + @Override + public DataType getCqlType() { + return this.cqlType; + } + + @Nullable + @Override + public ByteBuffer encode( + @Nullable CqlVector value, @NonNull ProtocolVersion protocolVersion) { + boolean isVarSized = !subtypeCodec.serializedSize().isPresent(); + if (value == null || cqlType.getDimensions() <= 0) { + return null; + } + ByteBuffer[] valueBuffs = new ByteBuffer[cqlType.getDimensions()]; + Iterator values = value.iterator(); + int allValueBuffsSize = 0; + for (int i = 0; i < cqlType.getDimensions(); ++i) { + ByteBuffer valueBuff; + SubtypeT valueObj; + + try { + valueObj = values.next(); + } catch (NoSuchElementException nsee) { + throw new IllegalArgumentException( + String.format( + "Not enough elements; must provide elements for %d dimensions", + cqlType.getDimensions())); + } + + try { + valueBuff = this.subtypeCodec.encode(valueObj, protocolVersion); + } catch (ClassCastException e) { + throw new IllegalArgumentException("Invalid type for element: " + valueObj.getClass()); + } + if (valueBuff == null) { + throw new NullPointerException("Vector elements cannot encode to CQL NULL"); + } + int elementSize = valueBuff.limit(); + if (isVarSized) { + allValueBuffsSize += VIntCoding.computeVIntSize(elementSize); + } + allValueBuffsSize += elementSize; + valueBuff.rewind(); + valueBuffs[i] = valueBuff; + } + // if too many elements, throw + if (values.hasNext()) { + throw new IllegalArgumentException( + String.format( + "Too many elements; must provide elements for %d dimensions", + cqlType.getDimensions())); + } + /* Since we already did an early return for <= 0 dimensions above */ + assert valueBuffs.length > 0; + ByteBuffer rv = ByteBuffer.allocate(allValueBuffsSize); + for (int i = 0; i < cqlType.getDimensions(); ++i) { + if (isVarSized) { + VIntCoding.writeUnsignedVInt32(valueBuffs[i].remaining(), rv); + } + rv.put(valueBuffs[i]); + } + rv.flip(); + return rv; + } + + @Nullable + @Override + public CqlVector decode( + @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) { + return null; + } + + // Upfront check for fixed-size types only + subtypeCodec + .serializedSize() + .ifPresent( + (fixed_size) -> { + if (bytes.remaining() != cqlType.getDimensions() * fixed_size) { + throw new IllegalArgumentException( + String.format( + "Expected elements of uniform size, observed %d elements with total bytes %d", + cqlType.getDimensions(), bytes.remaining())); + } + }); + ; + ByteBuffer slice = bytes.slice(); + List rv = new ArrayList(cqlType.getDimensions()); + for (int i = 0; i < cqlType.getDimensions(); ++i) { + + int size = + subtypeCodec + .serializedSize() + .orElseGet(() -> VIntCoding.getUnsignedVInt32(slice, slice.position())); + // If we aren't dealing with a fixed-size type we need to move the current slice position + // beyond the vint-encoded size of the current element. Ideally this would be + // serializedSize().ifNotPresent(Consumer) but the Optional API isn't doing us any favors + // there. + if (!subtypeCodec.serializedSize().isPresent()) + slice.position(slice.position() + VIntCoding.computeUnsignedVIntSize(size)); + int originalPosition = slice.position(); + slice.limit(originalPosition + size); + rv.add(this.subtypeCodec.decode(slice, protocolVersion)); + // Move to the start of the next element + slice.position(originalPosition + size); + // Reset the limit to the end of the buffer + slice.limit(slice.capacity()); + } + + // if too many elements, throw + if (slice.hasRemaining()) { + throw new IllegalArgumentException( + String.format( + "Too many elements; must provide elements for %d dimensions", + cqlType.getDimensions())); + } + + return CqlVector.newInstance(rv); + } + + @NonNull + @Override + public String format(CqlVector value) { + if (value == null) return "NULL"; + return value.stream().map(subtypeCodec::format).collect(Collectors.joining(", ", "[", "]")); + } + + @Nullable + @Override + public CqlVector parse(@Nullable String value) { + return (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) + ? null + : CqlVector.from(value, this.subtypeCodec); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java new file mode 100644 index 00000000000..e62e244bf5e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras; + +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collection; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import net.jcip.annotations.Immutable; + +/** + * A codec that wraps other codecs around {@link Optional} instances. + * + * @param The wrapped Java type. + */ +@Immutable +public class OptionalCodec extends MappingCodec> { + + public OptionalCodec(@NonNull TypeCodec innerCodec) { + super( + Objects.requireNonNull(innerCodec, "innerCodec must not be null"), + GenericType.optionalOf(innerCodec.getJavaType())); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + if (value instanceof Optional) { + Optional optional = (Optional) value; + return optional.map(innerCodec::accepts).orElse(true); + } + return false; + } + + @Nullable + @Override + protected Optional innerToOuter(@Nullable T value) { + return Optional.ofNullable(isAbsent(value) ? null : value); + } + + @Nullable + @Override + protected T outerToInner(@Nullable Optional value) { + return value != null && value.isPresent() ? value.get() : null; + } + + protected boolean isAbsent(@Nullable T value) { + return value == null + || (value instanceof Collection && ((Collection) value).isEmpty()) + || (value instanceof Map && ((Map) value).isEmpty()); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java new file mode 100644 index 00000000000..fcf61a4e7b3 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractListToArrayCodec.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.ListType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.ParseUtils; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.lang.reflect.Array; +import java.util.Objects; + +/** + * Base class for all codecs dealing with Java arrays. This class aims to reduce the amount of code + * required to create such codecs. + * + * @param The Java array type this codec handles + */ +public abstract class AbstractListToArrayCodec implements TypeCodec { + + @NonNull protected final ListType cqlType; + @NonNull protected final GenericType javaType; + + /** + * @param cqlType The CQL type. Must be a list type. + * @param arrayType The Java type. Must be an array class. + */ + protected AbstractListToArrayCodec( + @NonNull ListType cqlType, @NonNull GenericType arrayType) { + this.cqlType = Objects.requireNonNull(cqlType, "cqlType cannot be null"); + this.javaType = Objects.requireNonNull(arrayType, "arrayType cannot be null"); + if (!arrayType.isArray()) { + throw new IllegalArgumentException("Expecting Java array class, got " + arrayType); + } + } + + @NonNull + @Override + public GenericType getJavaType() { + return javaType; + } + + @NonNull + @Override + public DataType getCqlType() { + return cqlType; + } + + @NonNull + @Override + public String format(@Nullable ArrayT array) { + if (array == null) { + return "NULL"; + } + int length = Array.getLength(array); + StringBuilder sb = new StringBuilder(); + sb.append('['); + for (int i = 0; i < length; i++) { + if (i != 0) { + sb.append(","); + } + formatElement(sb, array, i); + } + sb.append(']'); + return sb.toString(); + } + + @Nullable + @Override + public ArrayT parse(@Nullable String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { + return null; + } + int idx = skipSpaces(value, 0); + idx = skipOpeningBracket(value, idx); + idx = skipSpaces(value, idx); + if (value.charAt(idx) == ']') { + return newInstance(0); + } + // first pass: determine array length + int length = getArrayLength(value, idx); + // second pass: parse elements + ArrayT array = newInstance(length); + int i = 0; + for (; idx < value.length(); i++) { + int n = skipLiteral(value, idx); + parseElement(value.substring(idx, n), array, i); + idx = skipSpaces(value, n); + if (value.charAt(idx) == ']') { + return array; + } + idx = skipComma(value, idx); + idx = skipSpaces(value, idx); + } + throw new IllegalArgumentException( + String.format("Malformed list value \"%s\", missing closing ']'", value)); + } + + /** + * Creates a new array instance with the given size. + * + * @param size The size of the array to instantiate. + * @return a new array instance with the given size. + */ + @NonNull + protected abstract ArrayT newInstance(int size); + + /** + * Formats the {@code index}th element of {@code array} to {@code output}. + * + * @param output The StringBuilder to write to. + * @param array The array to read from. + * @param index The element index. + */ + protected abstract void formatElement( + @NonNull StringBuilder output, @NonNull ArrayT array, int index); + + /** + * Parses the {@code index}th element of {@code array} from {@code input}. + * + * @param input The String to read from. + * @param array The array to write to. + * @param index The element index. + */ + protected abstract void parseElement(@NonNull String input, @NonNull ArrayT array, int index); + + private int getArrayLength(String value, int idx) { + int length = 1; + for (; idx < value.length(); length++) { + idx = skipLiteral(value, idx); + idx = skipSpaces(value, idx); + if (value.charAt(idx) == ']') { + break; + } + idx = skipComma(value, idx); + idx = skipSpaces(value, idx); + } + return length; + } + + private int skipComma(String value, int idx) { + if (value.charAt(idx) != ',') { + throw new IllegalArgumentException( + String.format( + "Cannot parse list value from \"%s\", at character %d expecting ',' but got '%c'", + value, idx, value.charAt(idx))); + } + return idx + 1; + } + + private int skipOpeningBracket(String value, int idx) { + if (value.charAt(idx) != '[') { + throw new IllegalArgumentException( + String.format( + "cannot parse list value from \"%s\", at character %d expecting '[' but got '%c'", + value, idx, value.charAt(idx))); + } + return idx + 1; + } + + private int skipSpaces(String value, int idx) { + try { + return ParseUtils.skipSpaces(value, idx); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + "Cannot parse list value from \"%s\", at character %d expecting space but got '%c'", + value, idx, value.charAt(idx)), + e); + } + } + + private int skipLiteral(String value, int idx) { + try { + return ParseUtils.skipCQLValue(value, idx); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + "Cannot parse list value from \"%s\", invalid CQL value at character %d", value, idx), + e); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java new file mode 100644 index 00000000000..3e5ece7c159 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/AbstractPrimitiveListToArrayCodec.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.ListType; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.lang.reflect.Array; +import java.nio.ByteBuffer; +import java.util.Objects; + +/** + * Base class for all codecs dealing with Java primitive arrays. This class provides a more + * efficient implementation of {@link #encode(Object, ProtocolVersion)} and {@link + * #decode(ByteBuffer, ProtocolVersion)} for primitive arrays. + * + * @param The Java primitive array type this codec handles + */ +public abstract class AbstractPrimitiveListToArrayCodec + extends AbstractListToArrayCodec { + + /** + * @param cqlType The CQL type. Must be a list type. + * @param javaClass The Java type. Must be an array class. + */ + protected AbstractPrimitiveListToArrayCodec( + @NonNull ListType cqlType, @NonNull GenericType javaClass) { + super(cqlType, javaClass); + GenericType componentType = Objects.requireNonNull(javaClass.getComponentType()); + if (!componentType.isPrimitive()) { + throw new IllegalArgumentException( + "Expecting primitive array component type, got " + componentType); + } + } + + @Nullable + @Override + public ByteBuffer encode( + @Nullable PrimitiveArrayT array, @NonNull ProtocolVersion protocolVersion) { + if (array == null) { + return null; + } + int length = Array.getLength(array); + int sizeOfElement = 4 + sizeOfComponentType(); + int totalSize = 4 + length * sizeOfElement; + ByteBuffer output = ByteBuffer.allocate(totalSize); + output.putInt(length); + for (int i = 0; i < length; i++) { + output.putInt(sizeOfComponentType()); + serializeElement(output, array, i, protocolVersion); + } + output.flip(); + return output; + } + + @Nullable + @Override + public PrimitiveArrayT decode( + @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) { + return newInstance(0); + } + ByteBuffer input = bytes.duplicate(); + int length = input.getInt(); + PrimitiveArrayT array = newInstance(length); + for (int i = 0; i < length; i++) { + int elementSize = input.getInt(); + // Null elements can happen on the decode path, but we cannot tolerate them + if (elementSize < 0) { + throw new NullPointerException("Primitive arrays cannot store null elements"); + } else { + deserializeElement(input, array, i, protocolVersion); + } + } + return array; + } + + /** + * Return the size in bytes of the array component type. + * + * @return the size in bytes of the array component type. + */ + protected abstract int sizeOfComponentType(); + + /** + * Write the {@code index}th element of {@code array} to {@code output}. + * + * @param output The ByteBuffer to write to. + * @param array The array to read from. + * @param index The element index. + * @param protocolVersion The protocol version to use. + */ + protected abstract void serializeElement( + @NonNull ByteBuffer output, + @NonNull PrimitiveArrayT array, + int index, + @NonNull ProtocolVersion protocolVersion); + + /** + * Read the {@code index}th element of {@code array} from {@code input}. + * + * @param input The ByteBuffer to read from. + * @param array The array to write to. + * @param index The element index. + * @param protocolVersion The protocol version to use. + */ + protected abstract void deserializeElement( + @NonNull ByteBuffer input, + @NonNull PrimitiveArrayT array, + int index, + @NonNull ProtocolVersion protocolVersion); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java new file mode 100644 index 00000000000..c9cc0baa41f --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanListToArrayCodec.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code boolean[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code boolean[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * boolean} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ +@Immutable +public class BooleanListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + private static final byte TRUE = (byte) 1; + private static final byte FALSE = (byte) 0; + + public BooleanListToArrayCodec() { + super(DataTypes.listOf(DataTypes.BOOLEAN), GenericType.of(boolean[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return boolean[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof boolean[]; + } + + @Override + protected int sizeOfComponentType() { + return 1; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull boolean[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + byte element = array[index] ? TRUE : FALSE; + output.put(element); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull boolean[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.get() == TRUE; + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull boolean[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull boolean[] array, int index) { + array[index] = Boolean.parseBoolean(input); + } + + @NonNull + @Override + protected boolean[] newInstance(int size) { + return new boolean[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java new file mode 100644 index 00000000000..b811908e341 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteListToArrayCodec.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.SimpleBlobCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code byte[]}. + * + *

      Note that this codec is not suitable for reading CQL blobs as byte arrays; you should use + * {@link SimpleBlobCodec} for that. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code byte[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code byte} + * values; it also instantiates arrays without the need for an intermediary Java {@code List} + * object. + */ +@Immutable +public class ByteListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + public ByteListToArrayCodec() { + super(DataTypes.listOf(DataTypes.SMALLINT), GenericType.of(byte[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return byte[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof byte[]; + } + + @Override + protected int sizeOfComponentType() { + return 1; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull byte[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.put(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull byte[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.get(); + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull byte[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull byte[] array, int index) { + array[index] = Byte.parseByte(input); + } + + @NonNull + @Override + protected byte[] newInstance(int size) { + return new byte[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java new file mode 100644 index 00000000000..fdf5befa635 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleListToArrayCodec.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code double[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code double[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * double} values; it also instantiates arrays without the need for an intermediary Java {@code + * List} object. + */ +@Immutable +public class DoubleListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + public DoubleListToArrayCodec() { + super(DataTypes.listOf(DataTypes.DOUBLE), GenericType.of(double[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return double[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof double[]; + } + + @Override + protected int sizeOfComponentType() { + return 8; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull double[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.putDouble(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull double[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.getDouble(); + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull double[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull double[] array, int index) { + array[index] = Double.parseDouble(input); + } + + @NonNull + @Override + protected double[] newInstance(int size) { + return new double[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java new file mode 100644 index 00000000000..b77e5d1243d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatListToArrayCodec.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code float[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code float[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * float} values; it also instantiates arrays without the need for an intermediary Java {@code List} + * object. + */ +@Immutable +public class FloatListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + public FloatListToArrayCodec() { + super(DataTypes.listOf(DataTypes.FLOAT), GenericType.of(float[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return float[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof float[]; + } + + @Override + protected int sizeOfComponentType() { + return 4; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull float[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.putFloat(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull float[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.getFloat(); + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull float[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull float[] array, int index) { + array[index] = Float.parseFloat(input); + } + + @NonNull + @Override + protected float[] newInstance(int size) { + return new float[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java new file mode 100644 index 00000000000..cf464282b1e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntListToArrayCodec.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code int[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code int[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code int} + * values; it also instantiates arrays without the need for an intermediary Java {@code List} + * object. + */ +@Immutable +public class IntListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + public IntListToArrayCodec() { + super(DataTypes.listOf(DataTypes.INT), GenericType.of(int[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return int[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof int[]; + } + + @Override + protected int sizeOfComponentType() { + return 4; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull int[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.putInt(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull int[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.getInt(); + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull int[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull int[] array, int index) { + array[index] = Integer.parseInt(input); + } + + @NonNull + @Override + protected int[] newInstance(int size) { + return new int[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java new file mode 100644 index 00000000000..bde21d40272 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongListToArrayCodec.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code long[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code long[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code long} + * values; it also instantiates arrays without the need for an intermediary Java {@code List} + * object. + */ +@Immutable +public class LongListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + public LongListToArrayCodec() { + super(DataTypes.listOf(DataTypes.BIGINT), GenericType.of(long[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return long[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof long[]; + } + + @Override + protected int sizeOfComponentType() { + return 8; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull long[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.putLong(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull long[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.getLong(); + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull long[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull long[] array, int index) { + array[index] = Long.parseLong(input); + } + + @NonNull + @Override + protected long[] newInstance(int size) { + return new long[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java new file mode 100644 index 00000000000..8600ba3e9a5 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectListToArrayCodec.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.lang.reflect.Array; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * Codec dealing with Java object arrays. Serialization and deserialization of elements in the array + * is delegated to the provided element codec. + * + *

      For example, to create a codec that maps {@code list} to {@code String[]}, declare the + * following: + * + *

      {@code
      + * ObjectListToArrayCodec stringArrayCodec = new ObjectListToArrayCodec<>(TypeCodecs.TEXT);
      + * }
      + * + * @param The Java array component type this codec handles + */ +@Immutable +public class ObjectListToArrayCodec extends AbstractListToArrayCodec { + + private final TypeCodec elementCodec; + + public ObjectListToArrayCodec(@NonNull TypeCodec elementCodec) { + super( + DataTypes.listOf( + Objects.requireNonNull(elementCodec, "elementCodec must not be null").getCqlType()), + GenericType.arrayOf(elementCodec.getJavaType())); + this.elementCodec = elementCodec; + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + Class clazz = value.getClass(); + return clazz.isArray() + && clazz.getComponentType().equals(elementCodec.getJavaType().getRawType()); + } + + @Nullable + @Override + public ByteBuffer encode(@Nullable ElementT[] value, @NonNull ProtocolVersion protocolVersion) { + if (value == null) { + return null; + } + int i = 0; + ByteBuffer[] encodedElements = new ByteBuffer[value.length]; + int toAllocate = 4; // initialize with number of elements + for (ElementT elt : value) { + if (elt == null) { + throw new NullPointerException("Collection elements cannot be null"); + } + ByteBuffer encodedElement; + try { + encodedElement = elementCodec.encode(elt, protocolVersion); + } catch (ClassCastException e) { + throw new IllegalArgumentException( + String.format( + "Invalid type for %s element, expecting %s but got %s", + cqlType, elementCodec.getJavaType(), elt.getClass()), + e); + } + if (encodedElement == null) { + throw new NullPointerException("Collection elements cannot encode to CQL NULL"); + } + encodedElements[i++] = encodedElement; + toAllocate += 4 + encodedElement.remaining(); // the element preceded by its size + } + ByteBuffer result = ByteBuffer.allocate(toAllocate); + result.putInt(value.length); + for (ByteBuffer encodedElement : encodedElements) { + result.putInt(encodedElement.remaining()); + result.put(encodedElement); + } + result.flip(); + return result; + } + + @Nullable + @Override + public ElementT[] decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) { + return newInstance(0); + } + ByteBuffer input = bytes.duplicate(); + int size = input.getInt(); + ElementT[] result = newInstance(size); + for (int i = 0; i < size; i++) { + ElementT element; + int elementSize = input.getInt(); + // Allow null elements on the decode path, because Cassandra might return such collections + // for some computed values in the future -- e.g. SELECT ttl(some_collection) + if (elementSize < 0) { + element = null; + } else { + ByteBuffer encodedElement = input.slice(); + encodedElement.limit(elementSize); + element = elementCodec.decode(encodedElement, protocolVersion); + input.position(input.position() + elementSize); + } + result[i] = element; + } + return result; + } + + @Override + protected void formatElement( + @NonNull StringBuilder output, @NonNull ElementT[] array, int index) { + output.append(elementCodec.format(array[index])); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull ElementT[] array, int index) { + array[index] = elementCodec.parse(input); + } + + @NonNull + @Override + @SuppressWarnings("unchecked") + protected ElementT[] newInstance(int size) { + return (ElementT[]) Array.newInstance(getJavaType().getRawType().getComponentType(), size); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java new file mode 100644 index 00000000000..13bb5733bf9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortListToArrayCodec.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that maps the CQL type {@code list} to the Java type {@code short[]}. + * + *

      Note that this codec is designed for performance and converts CQL lists directly to + * {@code short[]}, thus avoiding any unnecessary boxing and unboxing of Java primitive {@code + * short} values; it also instantiates arrays without the need for an intermediary Java {@code List} + * object. + */ +@Immutable +public class ShortListToArrayCodec extends AbstractPrimitiveListToArrayCodec { + + public ShortListToArrayCodec() { + super(DataTypes.listOf(DataTypes.SMALLINT), GenericType.of(short[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return short[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof short[]; + } + + @Override + protected int sizeOfComponentType() { + return 2; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull short[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.putShort(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull short[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.getShort(); + } + + @Override + protected void formatElement(@NonNull StringBuilder output, @NonNull short[] array, int index) { + output.append(array[index]); + } + + @Override + protected void parseElement(@NonNull String input, @NonNull short[] array, int index) { + array[index] = Short.parseShort(input); + } + + @NonNull + @Override + protected short[] newInstance(int size) { + return new short[size]; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java new file mode 100644 index 00000000000..56363ef819e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodec.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.enums; + +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that serializes {@link Enum} instances as CQL {@code varchar}s representing their + * programmatic names as returned by {@link Enum#name()}. + * + *

      Note that this codec relies on the enum constant names; it is therefore vital that + * enum names never change. + * + * @param The Enum class this codec serializes from and deserializes to. + */ +@Immutable +public class EnumNameCodec> extends MappingCodec { + + private final Class enumClass; + + public EnumNameCodec(@NonNull Class enumClass) { + super( + TypeCodecs.TEXT, + GenericType.of(Objects.requireNonNull(enumClass, "enumClass must not be null"))); + this.enumClass = enumClass; + } + + @Nullable + @Override + protected EnumT innerToOuter(@Nullable String value) { + return value == null || value.isEmpty() ? null : Enum.valueOf(enumClass, value); + } + + @Nullable + @Override + protected String outerToInner(@Nullable EnumT value) { + return value == null ? null : value.name(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java new file mode 100644 index 00000000000..4d6ca26484e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodec.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.enums; + +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * A codec that serializes {@link Enum} instances as CQL {@code int}s representing their ordinal + * values as returned by {@link Enum#ordinal()}. + * + *

      Note that this codec relies on the enum constants declaration order; it is therefore + * vital that this order remains immutable. + * + * @param The Enum class this codec serializes from and deserializes to. + */ +@Immutable +public class EnumOrdinalCodec> extends MappingCodec { + + private final EnumT[] enumConstants; + + public EnumOrdinalCodec(@NonNull Class enumClass) { + super( + TypeCodecs.INT, + GenericType.of(Objects.requireNonNull(enumClass, "enumClass must not be null"))); + this.enumConstants = enumClass.getEnumConstants(); + } + + @Nullable + @Override + protected EnumT innerToOuter(@Nullable Integer value) { + return value == null ? null : enumConstants[value]; + } + + @Nullable + @Override + protected Integer outerToInner(@Nullable EnumT value) { + return value == null ? null : value.ordinal(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java new file mode 100644 index 00000000000..a971d27b3f3 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodec.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.json; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.util.Strings; +import com.datastax.oss.protocol.internal.util.Bytes; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.TypeFactory; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +/** + * A JSON codec that maps arbitrary Java objects to JSON strings stored as CQL type {@code text}, + * using the Jackson library to perform serialization and deserialization of JSON objects. + * + *

      Note that this codec requires the presence of Jackson library at runtime. If you use Maven, + * this can be done by declaring the following dependency in your project: + * + *

      {@code
      + * 
      + *   com.fasterxml.jackson.core
      + *   jackson-databind
      + *   LATEST
      + * 
      + * }
      + * + * @see Jackson JSON Library + * @param The Java type that this codec serializes from and deserializes to, from JSON strings. + */ +public class JsonCodec implements TypeCodec { + + private final ObjectMapper objectMapper; + private final GenericType javaType; + private final JavaType jacksonJavaType; + + /** + * Creates a new instance for the provided {@code javaClass}, using a default, newly-allocated + * {@link ObjectMapper}. + * + *

      The codec created with this constructor can handle all primitive CQL types as well as + * collections thereof, however it cannot handle tuples and user-defined types; if you need + * support for such CQL types, you need to create your own {@link ObjectMapper} and use the + * {@linkplain #JsonCodec(Class, ObjectMapper) two-arg constructor} instead. + * + * @param javaClass the Java class this codec maps to. + */ + public JsonCodec(@NonNull Class javaClass) { + this(GenericType.of(Objects.requireNonNull(javaClass, "javaClass cannot be null"))); + } + + /** + * Creates a new instance for the provided {@code javaType}, using a default, newly-allocated + * {@link ObjectMapper}. + * + *

      The codec created with this constructor can handle all primitive CQL types as well as + * collections thereof, however it cannot handle tuples and user-defined types; if you need + * support for such CQL types, you need to create your own {@link ObjectMapper} and use the + * {@linkplain #JsonCodec(GenericType, ObjectMapper) two-arg constructor} instead. + * + * @param javaType the Java type this codec maps to. + */ + public JsonCodec(@NonNull GenericType javaType) { + this(javaType, new ObjectMapper()); + } + + /** + * Creates a new instance for the provided {@code javaClass}, and using the provided {@link + * ObjectMapper}. + * + * @param javaClass the Java class this codec maps to. + * @param objectMapper the {@link ObjectMapper} instance to use. + */ + public JsonCodec(@NonNull Class javaClass, @NonNull ObjectMapper objectMapper) { + this( + GenericType.of(Objects.requireNonNull(javaClass, "javaClass cannot be null")), + objectMapper); + } + + /** + * Creates a new instance for the provided {@code javaType}, and using the provided {@link + * ObjectMapper}. + * + * @param javaType the Java type this codec maps to. + * @param objectMapper the {@link ObjectMapper} instance to use. + */ + public JsonCodec(@NonNull GenericType javaType, @NonNull ObjectMapper objectMapper) { + this.javaType = Objects.requireNonNull(javaType, "javaType cannot be null"); + this.objectMapper = Objects.requireNonNull(objectMapper, "objectMapper cannot be null"); + this.jacksonJavaType = TypeFactory.defaultInstance().constructType(javaType.getType()); + } + + @NonNull + @Override + public GenericType getJavaType() { + return javaType; + } + + @NonNull + @Override + public DataType getCqlType() { + return DataTypes.TEXT; + } + + @Nullable + @Override + public ByteBuffer encode(@Nullable T value, @NonNull ProtocolVersion protocolVersion) { + if (value == null) { + return null; + } + try { + return ByteBuffer.wrap(objectMapper.writeValueAsBytes(value)); + } catch (JsonProcessingException e) { + throw new IllegalArgumentException("Failed to encode value as JSON", e); + } + } + + @Nullable + @Override + public T decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + if (bytes == null) { + return null; + } + try { + return objectMapper.readValue(Bytes.getArray(bytes), jacksonJavaType); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to decode JSON value", e); + } + } + + @NonNull + @Override + public String format(@Nullable T value) { + if (value == null) { + return "NULL"; + } + String json; + try { + json = objectMapper.writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new IllegalArgumentException("Failed to format value as JSON", e); + } + return Strings.quote(json); + } + + @Nullable + @Override + public T parse(@Nullable String value) { + if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { + return null; + } + if (!Strings.isQuoted(value)) { + throw new IllegalArgumentException("JSON strings must be enclosed by single quotes"); + } + String json = Strings.unquote(value); + try { + return objectMapper.readValue(json, jacksonJavaType); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse value as JSON", e); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java new file mode 100644 index 00000000000..6b66b5d2049 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodec.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.time; + +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * {@link TypeCodec} that maps {@link LocalDateTime} to CQL {@code timestamp}, allowing the setting + * and retrieval of {@code timestamp} columns as {@link LocalDateTime} instances. + * + *

      This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for + * important remarks about implementation notes and accepted timestamp formats. + */ +@Immutable +public class LocalTimestampCodec extends MappingCodec { + + private final ZoneId timeZone; + + /** + * Creates a new {@code LocalTimestampCodec} that converts CQL timestamps into {@link + * LocalDateTime} instances using the system's {@linkplain ZoneId#systemDefault() default time + * zone} as their time zone. The supplied {@code timeZone} will also be used to parse CQL + * timestamp literals that do not include any time zone information. + */ + public LocalTimestampCodec() { + this(ZoneId.systemDefault()); + } + + /** + * Creates a new {@code LocalTimestampCodec} that converts CQL timestamps into {@link + * LocalDateTime} instances using the given {@link ZoneId} as their time zone. The supplied {@code + * timeZone} will also be used to parse CQL timestamp literals that do not include any time zone + * information. + */ + public LocalTimestampCodec(@NonNull ZoneId timeZone) { + super( + new TimestampCodec(Objects.requireNonNull(timeZone, "timeZone cannot be null")), + GenericType.LOCAL_DATE_TIME); + this.timeZone = timeZone; + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof LocalDateTime; + } + + @Nullable + @Override + protected LocalDateTime innerToOuter(@Nullable Instant value) { + return value == null ? null : LocalDateTime.ofInstant(value, timeZone); + } + + @Nullable + @Override + protected Instant outerToInner(@Nullable LocalDateTime value) { + return value == null ? null : value.atZone(timeZone).toInstant(); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java new file mode 100644 index 00000000000..c16a64b9ad9 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodec.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.time; + +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.Objects; +import net.jcip.annotations.Immutable; + +/** + * {@link TypeCodec} that maps {@link ZonedDateTime} to CQL {@code tuple}, + * providing a pattern for maintaining timezone information in Cassandra. + * + *

      Since Cassandra's timestamp type does not store any time zone, by using a + * tuple<timestamp,varchar> a timezone can be persisted in the varchar + * field of such tuples, and so when the value is deserialized the original timezone is + * preserved. + * + *

      Note: if you want to retrieve CQL timestamps as {@link ZonedDateTime} instances but don't need + * to persist the time zone to the database, you should rather use {@link ZonedTimestampCodec}. + */ +@Immutable +public class PersistentZonedTimestampCodec extends MappingCodec { + + private static final TupleType CQL_TYPE = DataTypes.tupleOf(DataTypes.TIMESTAMP, DataTypes.TEXT); + + public PersistentZonedTimestampCodec() { + super(TypeCodecs.tupleOf(CQL_TYPE), GenericType.ZONED_DATE_TIME); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof ZonedDateTime; + } + + @NonNull + @Override + public TupleType getCqlType() { + return CQL_TYPE; + } + + @NonNull + @Override + public String format(@Nullable ZonedDateTime value) { + if (value == null) { + return "NULL"; + } + // Use TIMESTAMP_UTC for a better-looking format + return "(" + + ExtraTypeCodecs.TIMESTAMP_UTC.format(value.toInstant()) + + "," + + TypeCodecs.TEXT.format(value.getZone().toString()) + + ")"; + } + + @Nullable + @Override + protected ZonedDateTime innerToOuter(@Nullable TupleValue value) { + if (value == null) { + return null; + } else { + Instant instant = Objects.requireNonNull(value.getInstant(0)); + ZoneId zoneId = ZoneId.of(Objects.requireNonNull(value.getString(1))); + return ZonedDateTime.ofInstant(instant, zoneId); + } + } + + @Nullable + @Override + protected TupleValue outerToInner(@Nullable ZonedDateTime value) { + if (value == null) { + return null; + } else { + Instant instant = value.toInstant(); + String zoneId = value.getZone().toString(); + return this.getCqlType().newValue(instant, zoneId); + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java new file mode 100644 index 00000000000..12e3e839d2a --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodec.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.time; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.PrimitiveLongCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.ZoneId; +import java.util.Objects; +import java.util.Optional; +import net.jcip.annotations.Immutable; + +/** + * A {@link TypeCodec} that maps CQL timestamps to Java primitive longs, representing the number of + * milliseconds since the Epoch. + * + *

      This codec can serve as a replacement for the driver's built-in {@link TypeCodecs#TIMESTAMP + * timestamp} codec, when application code prefers to deal with raw milliseconds than with {@link + * Instant} instances. + * + *

      This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for + * important remarks about implementation notes and accepted timestamp formats. + */ +@Immutable +public class TimestampMillisCodec implements PrimitiveLongCodec { + + private final TimestampCodec timestampCodec; + + /** + * Creates a new {@code TimestampMillisCodec} that uses the system's {@linkplain + * ZoneId#systemDefault() default time zone} to parse timestamp literals that do not include any + * time zone information. + */ + public TimestampMillisCodec() { + this(ZoneId.systemDefault()); + } + + /** + * Creates a new {@code TimestampMillisCodec} that uses the given {@link ZoneId} to parse + * timestamp literals that do not include any time zone information. + */ + public TimestampMillisCodec(ZoneId defaultZoneId) { + timestampCodec = new TimestampCodec(defaultZoneId); + } + + @NonNull + @Override + public GenericType getJavaType() { + return GenericType.LONG; + } + + @NonNull + @Override + public DataType getCqlType() { + return DataTypes.TIMESTAMP; + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + return javaClass == Long.class || javaClass == long.class; + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof Long; + } + + @Nullable + @Override + public ByteBuffer encodePrimitive(long value, @NonNull ProtocolVersion protocolVersion) { + return TypeCodecs.BIGINT.encodePrimitive(value, protocolVersion); + } + + @Override + public long decodePrimitive( + @Nullable ByteBuffer value, @NonNull ProtocolVersion protocolVersion) { + return TypeCodecs.BIGINT.decodePrimitive(value, protocolVersion); + } + + @Nullable + @Override + public Long parse(@Nullable String value) { + Instant instant = timestampCodec.parse(value); + return instant == null ? null : instant.toEpochMilli(); + } + + @NonNull + @Override + public String format(@Nullable Long value) { + Instant instant = value == null ? null : Instant.ofEpochMilli(value); + return timestampCodec.format(instant); + } + + @NonNull + @Override + public Optional serializedSize() { + return Optional.of(8); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ZonedTimestampCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java similarity index 53% rename from core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ZonedTimestampCodec.java rename to core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java index 16649fd8daa..a0947ff3493 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/ZonedTimestampCodec.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,19 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.internal.core.type.codec; +package com.datastax.oss.driver.internal.core.type.codec.extras.time; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodec; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; -import java.nio.ByteBuffer; import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; +import java.util.Objects; import net.jcip.annotations.ThreadSafe; /** @@ -34,7 +34,8 @@ * *

      Note that Apache Cassandra(R)'s timestamp type does not store any time zone; this codec is * provided merely as a convenience for users that need to deal with zoned timestamps in their - * applications. + * applications. If you need to persist the time zone in the database, consider using {@link + * PersistentZonedTimestampCodec} instead. * *

      This codec shares its logic with {@link TimestampCodec}. See the javadocs of this codec for * important remarks about implementation notes and accepted timestamp formats. @@ -42,9 +43,8 @@ * @see TimestampCodec */ @ThreadSafe -public class ZonedTimestampCodec implements TypeCodec { +public class ZonedTimestampCodec extends MappingCodec { - private final TypeCodec instantCodec; private final ZoneId timeZone; /** @@ -64,63 +64,27 @@ public ZonedTimestampCodec() { * information. */ public ZonedTimestampCodec(ZoneId timeZone) { - instantCodec = new TimestampCodec(timeZone); + super( + new TimestampCodec(Objects.requireNonNull(timeZone, "timeZone cannot be null")), + GenericType.ZONED_DATE_TIME); this.timeZone = timeZone; } - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.ZONED_DATE_TIME; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TIMESTAMP; - } - @Override public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); return value instanceof ZonedDateTime; } - @Override - public boolean accepts(@NonNull Class javaClass) { - return javaClass == ZonedDateTime.class; - } - - @Nullable - @Override - public ByteBuffer encode( - @Nullable ZonedDateTime value, @NonNull ProtocolVersion protocolVersion) { - return instantCodec.encode(value != null ? value.toInstant() : null, protocolVersion); - } - @Nullable @Override - public ZonedDateTime decode( - @Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - Instant instant = instantCodec.decode(bytes, protocolVersion); - if (instant == null) { - return null; - } - return instant.atZone(timeZone); - } - - @NonNull - @Override - public String format(@Nullable ZonedDateTime value) { - return instantCodec.format(value != null ? value.toInstant() : null); + protected ZonedDateTime innerToOuter(@Nullable Instant value) { + return value == null ? null : value.atZone(timeZone); } @Nullable @Override - public ZonedDateTime parse(@Nullable String value) { - Instant instant = instantCodec.parse(value); - if (instant == null) { - return null; - } - return instant.atZone(timeZone); + protected Instant outerToInner(@Nullable ZonedDateTime value) { + return value == null ? null : value.toInstant(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java new file mode 100644 index 00000000000..3e4e844783c --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/AbstractVectorToArrayCodec.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.vector; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.VectorType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.lang.reflect.Array; +import java.nio.ByteBuffer; +import java.util.Objects; + +/** Common super-class for all codecs which map a CQL vector type onto a primitive array */ +public abstract class AbstractVectorToArrayCodec implements TypeCodec { + + @NonNull protected final VectorType cqlType; + @NonNull protected final GenericType javaType; + + /** + * @param cqlType The CQL type. Must be a list type. + * @param arrayType The Java type. Must be an array class. + */ + protected AbstractVectorToArrayCodec( + @NonNull VectorType cqlType, @NonNull GenericType arrayType) { + this.cqlType = Objects.requireNonNull(cqlType, "cqlType cannot be null"); + this.javaType = Objects.requireNonNull(arrayType, "arrayType cannot be null"); + if (!arrayType.isArray()) { + throw new IllegalArgumentException("Expecting Java array class, got " + arrayType); + } + } + + @NonNull + @Override + public GenericType getJavaType() { + return this.javaType; + } + + @NonNull + @Override + public DataType getCqlType() { + return this.cqlType; + } + + @Nullable + @Override + public ByteBuffer encode(@Nullable ArrayT array, @NonNull ProtocolVersion protocolVersion) { + if (array == null) { + return null; + } + int length = Array.getLength(array); + int totalSize = length * sizeOfComponentType(); + ByteBuffer output = ByteBuffer.allocate(totalSize); + for (int i = 0; i < length; i++) { + serializeElement(output, array, i, protocolVersion); + } + output.flip(); + return output; + } + + @Nullable + @Override + public ArrayT decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { + if (bytes == null || bytes.remaining() == 0) { + throw new IllegalArgumentException( + "Input ByteBuffer must not be null and must have non-zero remaining bytes"); + } + ByteBuffer input = bytes.duplicate(); + int length = this.cqlType.getDimensions(); + int elementSize = sizeOfComponentType(); + ArrayT array = newInstance(); + for (int i = 0; i < length; i++) { + // Null elements can happen on the decode path, but we cannot tolerate them + if (elementSize < 0) { + throw new NullPointerException("Primitive arrays cannot store null elements"); + } else { + deserializeElement(input, array, i, protocolVersion); + } + } + return array; + } + + /** + * Creates a new array instance with a size matching the specified vector. + * + * @return a new array instance with a size matching the specified vector. + */ + @NonNull + protected abstract ArrayT newInstance(); + + /** + * Return the size in bytes of the array component type. + * + * @return the size in bytes of the array component type. + */ + protected abstract int sizeOfComponentType(); + + /** + * Write the {@code index}th element of {@code array} to {@code output}. + * + * @param output The ByteBuffer to write to. + * @param array The array to read from. + * @param index The element index. + * @param protocolVersion The protocol version to use. + */ + protected abstract void serializeElement( + @NonNull ByteBuffer output, + @NonNull ArrayT array, + int index, + @NonNull ProtocolVersion protocolVersion); + + /** + * Read the {@code index}th element of {@code array} from {@code input}. + * + * @param input The ByteBuffer to read from. + * @param array The array to write to. + * @param index The element index. + * @param protocolVersion The protocol version to use. + */ + protected abstract void deserializeElement( + @NonNull ByteBuffer input, + @NonNull ArrayT array, + int index, + @NonNull ProtocolVersion protocolVersion); +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java new file mode 100644 index 00000000000..86f31dc4980 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/vector/FloatVectorToArrayCodec.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.vector; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.VectorType; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.FloatCodec; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.base.Splitter; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Objects; + +/** A codec that maps CQL vectors to the Java type {@code float[]}. */ +public class FloatVectorToArrayCodec extends AbstractVectorToArrayCodec { + + public FloatVectorToArrayCodec(VectorType type) { + super(type, GenericType.of(float[].class)); + } + + @Override + public boolean accepts(@NonNull Class javaClass) { + Objects.requireNonNull(javaClass); + return float[].class.equals(javaClass); + } + + @Override + public boolean accepts(@NonNull Object value) { + Objects.requireNonNull(value); + return value instanceof float[]; + } + + @NonNull + @Override + protected float[] newInstance() { + return new float[cqlType.getDimensions()]; + } + + @Override + protected int sizeOfComponentType() { + return 4; + } + + @Override + protected void serializeElement( + @NonNull ByteBuffer output, + @NonNull float[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + output.putFloat(array[index]); + } + + @Override + protected void deserializeElement( + @NonNull ByteBuffer input, + @NonNull float[] array, + int index, + @NonNull ProtocolVersion protocolVersion) { + array[index] = input.getFloat(); + } + + @NonNull + @Override + public String format(@Nullable float[] value) { + return value == null ? "NULL" : Arrays.toString(value); + } + + @Nullable + @Override + public float[] parse(@Nullable String str) { + Preconditions.checkArgument(str != null, "Cannot create float array from null string"); + Preconditions.checkArgument(!str.isEmpty(), "Cannot create float array from empty string"); + + FloatCodec codec = new FloatCodec(); + float[] rv = this.newInstance(); + Iterator strIter = + Splitter.on(", ").trimResults().split(str.substring(1, str.length() - 1)).iterator(); + for (int i = 0; i < rv.length; ++i) { + String strVal = strIter.next(); + if (strVal == null) { + throw new IllegalArgumentException("Null element observed in float array string"); + } + Float f = codec.parse(strVal); + rv[i] = f.floatValue(); + } + return rv; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java index f85ccc40c3e..3af5a30ba27 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,19 +17,24 @@ */ package com.datastax.oss.driver.internal.core.type.codec.registry; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.core.data.TupleValue; import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.ContainerType; import com.datastax.oss.driver.api.core.type.CustomType; import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.ListType; import com.datastax.oss.driver.api.core.type.MapType; import com.datastax.oss.driver.api.core.type.SetType; import com.datastax.oss.driver.api.core.type.TupleType; import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.VectorType; import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import com.datastax.oss.driver.shaded.guava.common.reflect.TypeToken; @@ -37,10 +44,21 @@ import edu.umd.cs.findbugs.annotations.Nullable; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import net.jcip.annotations.ThreadSafe; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +75,7 @@ * implement {@link #getCachedCodec(DataType, GenericType, boolean)}. */ @ThreadSafe -public abstract class CachingCodecRegistry implements CodecRegistry { +public abstract class CachingCodecRegistry implements MutableCodecRegistry { private static final Logger LOG = LoggerFactory.getLogger(CachingCodecRegistry.class); @@ -68,19 +86,83 @@ public abstract class CachingCodecRegistry implements CodecRegistry { protected final String logPrefix; private final TypeCodec[] primitiveCodecs; - private final TypeCodec[] userCodecs; - private final IntMap primitiveCodecsByCode; + private final CopyOnWriteArrayList> userCodecs = new CopyOnWriteArrayList<>(); + private final IntMap> primitiveCodecsByCode; + private final Lock registerLock = new ReentrantLock(); protected CachingCodecRegistry( - @NonNull String logPrefix, - @NonNull TypeCodec[] primitiveCodecs, - @NonNull TypeCodec[] userCodecs) { + @NonNull String logPrefix, @NonNull TypeCodec[] primitiveCodecs) { this.logPrefix = logPrefix; this.primitiveCodecs = primitiveCodecs; - this.userCodecs = userCodecs; this.primitiveCodecsByCode = sortByProtocolCode(primitiveCodecs); } + /** + * @deprecated this constructor calls an overridable method ({@link #register(TypeCodec[])}), + * which is a bad practice. The recommended alternative is to use {@link + * #CachingCodecRegistry(String, TypeCodec[])}, then add the codecs with one of the {@link + * #register} methods. + */ + @Deprecated + protected CachingCodecRegistry( + @NonNull String logPrefix, + @NonNull TypeCodec[] primitiveCodecs, + @NonNull TypeCodec[] userCodecs) { + this(logPrefix, primitiveCodecs); + register(userCodecs); + } + + @Override + public void register(TypeCodec newCodec) { + // This method could work without synchronization, but there is a tiny race condition that would + // allow two threads to register colliding codecs (the last added codec would later be ignored, + // but without any warning). Serialize calls to avoid that: + registerLock.lock(); + try { + for (TypeCodec primitiveCodec : primitiveCodecs) { + if (collides(newCodec, primitiveCodec)) { + LOG.warn( + "[{}] Ignoring codec {} because it collides with built-in primitive codec {}", + logPrefix, + newCodec, + primitiveCodec); + return; + } + } + for (TypeCodec userCodec : userCodecs) { + if (collides(newCodec, userCodec)) { + LOG.warn( + "[{}] Ignoring codec {} because it collides with previously registered codec {}", + logPrefix, + newCodec, + userCodec); + return; + } + } + // Technically this would cover the two previous cases as well, but we want precise messages. + try { + TypeCodec cachedCodec = + getCachedCodec(newCodec.getCqlType(), newCodec.getJavaType(), false); + LOG.warn( + "[{}] Ignoring codec {} because it collides with previously generated codec {}", + logPrefix, + newCodec, + cachedCodec); + return; + } catch (CodecNotFoundException ignored) { + // Catching the exception is ugly, but it avoids breaking the internal API (e.g. by adding a + // getCachedCodecIfExists) + } + userCodecs.add(newCodec); + } finally { + registerLock.unlock(); + } + } + + private boolean collides(TypeCodec newCodec, TypeCodec oldCodec) { + return oldCodec.accepts(newCodec.getCqlType()) && oldCodec.accepts(newCodec.getJavaType()); + } + /** * Gets a complex codec from the cache. * @@ -176,12 +258,6 @@ public TypeCodec codecFor( } } - if (value instanceof TupleValue) { - return uncheckedCast(codecFor(cqlType, TupleValue.class)); - } else if (value instanceof UdtValue) { - return uncheckedCast(codecFor(cqlType, UdtValue.class)); - } - GenericType javaType = inspectType(value, cqlType); LOG.trace("[{}] Continuing based on inferred type {}", logPrefix, javaType); return uncheckedCast(getCachedCodec(cqlType, javaType, true)); @@ -206,15 +282,14 @@ public TypeCodec codecFor(@NonNull JavaTypeT value) { } } - if (value instanceof TupleValue) { - return uncheckedCast(codecFor(((TupleValue) value).getType(), TupleValue.class)); - } else if (value instanceof UdtValue) { - return uncheckedCast(codecFor(((UdtValue) value).getType(), UdtValue.class)); - } - - GenericType javaType = inspectType(value, null); - LOG.trace("[{}] Continuing based on inferred type {}", logPrefix, javaType); - return uncheckedCast(getCachedCodec(null, javaType, true)); + DataType cqlType = inferCqlTypeFromValue(value); + GenericType javaType = inspectType(value, cqlType); + LOG.trace( + "[{}] Continuing based on inferred CQL type {} and Java type {}", + logPrefix, + cqlType, + javaType); + return uncheckedCast(getCachedCodec(cqlType, javaType, true)); } @NonNull @@ -249,50 +324,81 @@ protected TypeCodec codecFor( protected boolean matches( @NonNull TypeCodec codec, @NonNull GenericType javaType, boolean isJavaCovariant) { - return (isJavaCovariant) - ? codec.getJavaType().isSupertypeOf(javaType) - : codec.accepts(javaType); + return isJavaCovariant ? codec.getJavaType().isSupertypeOf(javaType) : codec.accepts(javaType); } @NonNull protected GenericType inspectType(@NonNull Object value, @Nullable DataType cqlType) { if (value instanceof List) { - List list = (List) value; + List list = (List) value; if (list.isEmpty()) { // Empty collections are always encoded the same way, so any element type will do // in the absence of a CQL type. When the CQL type is known, we try to infer the best Java // type. return cqlType == null ? JAVA_TYPE_FOR_EMPTY_LISTS : inferJavaTypeFromCqlType(cqlType); } else { + Object firstElement = list.get(0); + if (firstElement == null) { + throw new IllegalArgumentException( + "Can't infer list codec because the first element is null " + + "(note that CQL does not allow null values in collections)"); + } GenericType elementType = inspectType( - list.get(0), cqlType == null ? null : ((ListType) cqlType).getElementType()); + firstElement, cqlType == null ? null : ((ContainerType) cqlType).getElementType()); return GenericType.listOf(elementType); } } else if (value instanceof Set) { - Set set = (Set) value; + Set set = (Set) value; if (set.isEmpty()) { return cqlType == null ? JAVA_TYPE_FOR_EMPTY_SETS : inferJavaTypeFromCqlType(cqlType); } else { + Object firstElement = set.iterator().next(); + if (firstElement == null) { + throw new IllegalArgumentException( + "Can't infer set codec because the first element is null " + + "(note that CQL does not allow null values in collections)"); + } GenericType elementType = inspectType( - set.iterator().next(), - cqlType == null ? null : ((SetType) cqlType).getElementType()); + firstElement, cqlType == null ? null : ((SetType) cqlType).getElementType()); return GenericType.setOf(elementType); } } else if (value instanceof Map) { - Map map = (Map) value; + Map map = (Map) value; if (map.isEmpty()) { return cqlType == null ? JAVA_TYPE_FOR_EMPTY_MAPS : inferJavaTypeFromCqlType(cqlType); } else { - Map.Entry entry = map.entrySet().iterator().next(); + Map.Entry firstEntry = map.entrySet().iterator().next(); + Object firstKey = firstEntry.getKey(); + Object firstValue = firstEntry.getValue(); + if (firstKey == null || firstValue == null) { + throw new IllegalArgumentException( + "Can't infer map codec because the first key and/or value is null " + + "(note that CQL does not allow null values in collections)"); + } GenericType keyType = - inspectType(entry.getKey(), cqlType == null ? null : ((MapType) cqlType).getKeyType()); + inspectType(firstKey, cqlType == null ? null : ((MapType) cqlType).getKeyType()); GenericType valueType = - inspectType( - entry.getValue(), cqlType == null ? null : ((MapType) cqlType).getValueType()); + inspectType(firstValue, cqlType == null ? null : ((MapType) cqlType).getValueType()); return GenericType.mapOf(keyType, valueType); } + } else if (value instanceof CqlVector) { + CqlVector vector = (CqlVector) value; + if (vector.isEmpty()) { + return cqlType == null ? JAVA_TYPE_FOR_EMPTY_CQLVECTORS : inferJavaTypeFromCqlType(cqlType); + } else { + Object firstElement = vector.iterator().next(); + if (firstElement == null) { + throw new IllegalArgumentException( + "Can't infer vector codec because the first element is null " + + "(note that CQL does not allow null values in collections)"); + } + GenericType elementType = + inspectType( + firstElement, cqlType == null ? null : ((VectorType) cqlType).getElementType()); + return GenericType.vectorOf(elementType); + } } else { // There's not much more we can do return GenericType.of(value.getClass()); @@ -312,6 +418,10 @@ protected GenericType inferJavaTypeFromCqlType(@NonNull DataType cqlType) { DataType valueType = ((MapType) cqlType).getValueType(); return GenericType.mapOf( inferJavaTypeFromCqlType(keyType), inferJavaTypeFromCqlType(valueType)); + } else if (cqlType instanceof VectorType) { + DataType elementType = ((VectorType) cqlType).getElementType(); + GenericType numberType = inferJavaTypeFromCqlType(elementType); + return GenericType.vectorOf(numberType); } switch (cqlType.getProtocolCode()) { case ProtocolConstants.DataType.CUSTOM: @@ -361,6 +471,141 @@ protected GenericType inferJavaTypeFromCqlType(@NonNull DataType cqlType) { } } + @Nullable + protected DataType inferCqlTypeFromValue(@NonNull Object value) { + if (value instanceof List) { + List list = (List) value; + if (list.isEmpty()) { + return CQL_TYPE_FOR_EMPTY_LISTS; + } + Object firstElement = list.get(0); + if (firstElement == null) { + throw new IllegalArgumentException( + "Can't infer list codec because the first element is null " + + "(note that CQL does not allow null values in collections)"); + } + DataType elementType = inferCqlTypeFromValue(firstElement); + if (elementType == null) { + return null; + } + return DataTypes.listOf(elementType); + } else if (value instanceof Set) { + Set set = (Set) value; + if (set.isEmpty()) { + return CQL_TYPE_FOR_EMPTY_SETS; + } + Object firstElement = set.iterator().next(); + if (firstElement == null) { + throw new IllegalArgumentException( + "Can't infer set codec because the first element is null " + + "(note that CQL does not allow null values in collections)"); + } + DataType elementType = inferCqlTypeFromValue(firstElement); + if (elementType == null) { + return null; + } + return DataTypes.setOf(elementType); + } else if (value instanceof Map) { + Map map = (Map) value; + if (map.isEmpty()) { + return CQL_TYPE_FOR_EMPTY_MAPS; + } + Entry firstEntry = map.entrySet().iterator().next(); + Object firstKey = firstEntry.getKey(); + Object firstValue = firstEntry.getValue(); + if (firstKey == null || firstValue == null) { + throw new IllegalArgumentException( + "Can't infer map codec because the first key and/or value is null " + + "(note that CQL does not allow null values in collections)"); + } + DataType keyType = inferCqlTypeFromValue(firstKey); + DataType valueType = inferCqlTypeFromValue(firstValue); + if (keyType == null || valueType == null) { + return null; + } + return DataTypes.mapOf(keyType, valueType); + } else if (value instanceof CqlVector) { + CqlVector vector = (CqlVector) value; + if (vector.isEmpty()) { + return CQL_TYPE_FOR_EMPTY_VECTORS; + } + Object firstElement = vector.iterator().next(); + if (firstElement == null) { + throw new IllegalArgumentException( + "Can't infer vector codec because the first element is null " + + "(note that CQL does not allow null values in collections)"); + } + DataType elementType = inferCqlTypeFromValue(firstElement); + if (elementType == null) { + return null; + } + return DataTypes.vectorOf(elementType, vector.size()); + } + Class javaClass = value.getClass(); + if (ByteBuffer.class.isAssignableFrom(javaClass)) { + return DataTypes.BLOB; + } else if (String.class.equals(javaClass)) { + return DataTypes.TEXT; + } else if (Long.class.equals(javaClass)) { + return DataTypes.BIGINT; + } else if (Boolean.class.equals(javaClass)) { + return DataTypes.BOOLEAN; + } else if (BigDecimal.class.equals(javaClass)) { + return DataTypes.DECIMAL; + } else if (Double.class.equals(javaClass)) { + return DataTypes.DOUBLE; + } else if (Float.class.equals(javaClass)) { + return DataTypes.FLOAT; + } else if (Integer.class.equals(javaClass)) { + return DataTypes.INT; + } else if (Instant.class.equals(javaClass)) { + return DataTypes.TIMESTAMP; + } else if (UUID.class.equals(javaClass)) { + return DataTypes.UUID; + } else if (BigInteger.class.equals(javaClass)) { + return DataTypes.VARINT; + } else if (InetAddress.class.isAssignableFrom(javaClass)) { + return DataTypes.INET; + } else if (LocalDate.class.equals(javaClass)) { + return DataTypes.DATE; + } else if (LocalTime.class.equals(javaClass)) { + return DataTypes.TIME; + } else if (Short.class.equals(javaClass)) { + return DataTypes.SMALLINT; + } else if (Byte.class.equals(javaClass)) { + return DataTypes.TINYINT; + } else if (CqlDuration.class.equals(javaClass)) { + return DataTypes.DURATION; + } else if (UdtValue.class.isAssignableFrom(javaClass)) { + return ((UdtValue) value).getType(); + } else if (TupleValue.class.isAssignableFrom(javaClass)) { + return ((TupleValue) value).getType(); + } + // This might mean that the java type is a custom type with a custom codec, + // so don't throw CodecNotFoundException just yet. + return null; + } + + private TypeCodec getElementCodecForCqlAndJavaType( + ContainerType cqlType, TypeToken token, boolean isJavaCovariant) { + + DataType elementCqlType = cqlType.getElementType(); + if (token.getType() instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); + GenericType elementJavaType = GenericType.of(typeArguments[0]); + return uncheckedCast(codecFor(elementCqlType, elementJavaType, isJavaCovariant)); + } + return codecFor(elementCqlType); + } + + private TypeCodec getElementCodecForJavaType( + ParameterizedType parameterizedType, boolean isJavaCovariant) { + + Type[] typeArguments = parameterizedType.getActualTypeArguments(); + GenericType elementType = GenericType.of(typeArguments[0]); + return codecFor(elementType, isJavaCovariant); + } + // Try to create a codec when we haven't found it in the cache @NonNull protected TypeCodec createCodec( @@ -375,26 +620,12 @@ protected TypeCodec createCodec( } else { // Both non-null TypeToken token = javaType.__getToken(); if (cqlType instanceof ListType && List.class.isAssignableFrom(token.getRawType())) { - DataType elementCqlType = ((ListType) cqlType).getElementType(); - TypeCodec elementCodec; - if (token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType elementJavaType = GenericType.of(typeArguments[0]); - elementCodec = uncheckedCast(codecFor(elementCqlType, elementJavaType, isJavaCovariant)); - } else { - elementCodec = codecFor(elementCqlType); - } + TypeCodec elementCodec = + getElementCodecForCqlAndJavaType((ContainerType) cqlType, token, isJavaCovariant); return TypeCodecs.listOf(elementCodec); } else if (cqlType instanceof SetType && Set.class.isAssignableFrom(token.getRawType())) { - DataType elementCqlType = ((SetType) cqlType).getElementType(); - TypeCodec elementCodec; - if (token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType elementJavaType = GenericType.of(typeArguments[0]); - elementCodec = uncheckedCast(codecFor(elementCqlType, elementJavaType, isJavaCovariant)); - } else { - elementCodec = codecFor(elementCqlType); - } + TypeCodec elementCodec = + getElementCodecForCqlAndJavaType((ContainerType) cqlType, token, isJavaCovariant); return TypeCodecs.setOf(elementCodec); } else if (cqlType instanceof MapType && Map.class.isAssignableFrom(token.getRawType())) { DataType keyCqlType = ((MapType) cqlType).getKeyType(); @@ -418,6 +649,15 @@ protected TypeCodec createCodec( } else if (cqlType instanceof UserDefinedType && UdtValue.class.isAssignableFrom(token.getRawType())) { return TypeCodecs.udtOf((UserDefinedType) cqlType); + } else if (cqlType instanceof VectorType + && CqlVector.class.isAssignableFrom(token.getRawType())) { + VectorType vectorType = (VectorType) cqlType; + /* For a vector type we'll always get back an instance of TypeCodec due to the + * type of CqlVector... but getElementCodecForCqlAndJavaType() is a generalized function that can't + * return this more precise type. Thus the cast here. */ + TypeCodec elementCodec = + uncheckedCast(getElementCodecForCqlAndJavaType(vectorType, token, isJavaCovariant)); + return TypeCodecs.vectorOf(vectorType, elementCodec); } else if (cqlType instanceof CustomType && ByteBuffer.class.isAssignableFrom(token.getRawType())) { return TypeCodecs.custom(cqlType); @@ -433,15 +673,13 @@ protected TypeCodec createCodec(@NonNull GenericType javaType, boolean isJ TypeToken token = javaType.__getToken(); if (List.class.isAssignableFrom(token.getRawType()) && token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType elementType = GenericType.of(typeArguments[0]); - TypeCodec elementCodec = codecFor(elementType, isJavaCovariant); + TypeCodec elementCodec = + getElementCodecForJavaType((ParameterizedType) token.getType(), isJavaCovariant); return TypeCodecs.listOf(elementCodec); } else if (Set.class.isAssignableFrom(token.getRawType()) && token.getType() instanceof ParameterizedType) { - Type[] typeArguments = ((ParameterizedType) token.getType()).getActualTypeArguments(); - GenericType elementType = GenericType.of(typeArguments[0]); - TypeCodec elementCodec = codecFor(elementType, isJavaCovariant); + TypeCodec elementCodec = + getElementCodecForJavaType((ParameterizedType) token.getType(), isJavaCovariant); return TypeCodecs.setOf(elementCodec); } else if (Map.class.isAssignableFrom(token.getRawType()) && token.getType() instanceof ParameterizedType) { @@ -452,6 +690,9 @@ protected TypeCodec createCodec(@NonNull GenericType javaType, boolean isJ TypeCodec valueCodec = codecFor(valueType, isJavaCovariant); return TypeCodecs.mapOf(keyCodec, valueCodec); } + /* Note that this method cannot generate TypeCodec instances for any CqlVector type. VectorCodec needs + * to know the dimensions of the vector it will be operating on and there's no way to determine that from + * the Java type alone. */ throw new CodecNotFoundException(null, javaType); } @@ -473,6 +714,11 @@ protected TypeCodec createCodec(@NonNull DataType cqlType) { TypeCodec keyCodec = codecFor(keyType); TypeCodec valueCodec = codecFor(valueType); return TypeCodecs.mapOf(keyCodec, valueCodec); + } else if (cqlType instanceof VectorType) { + VectorType vectorType = (VectorType) cqlType; + TypeCodec elementCodec = + uncheckedCast(codecFor(vectorType.getElementType())); + return TypeCodecs.vectorOf(vectorType, elementCodec); } else if (cqlType instanceof TupleType) { return TypeCodecs.tupleOf((TupleType) cqlType); } else if (cqlType instanceof UserDefinedType) { @@ -483,8 +729,8 @@ protected TypeCodec createCodec(@NonNull DataType cqlType) { throw new CodecNotFoundException(cqlType, null); } - private static IntMap sortByProtocolCode(TypeCodec[] codecs) { - IntMap.Builder builder = IntMap.builder(); + private static IntMap> sortByProtocolCode(TypeCodec[] codecs) { + IntMap.Builder> builder = IntMap.builder(); for (TypeCodec codec : codecs) { builder.put(codec.getCqlType().getProtocolCode(), codec); } @@ -508,4 +754,11 @@ private static TypeCodec uncheckedCast( GenericType.setOf(Boolean.class); private static final GenericType> JAVA_TYPE_FOR_EMPTY_MAPS = GenericType.mapOf(Boolean.class, Boolean.class); + private static final GenericType> JAVA_TYPE_FOR_EMPTY_CQLVECTORS = + GenericType.vectorOf(Number.class); + private static final DataType CQL_TYPE_FOR_EMPTY_LISTS = DataTypes.listOf(DataTypes.BOOLEAN); + private static final DataType CQL_TYPE_FOR_EMPTY_SETS = DataTypes.setOf(DataTypes.BOOLEAN); + private static final DataType CQL_TYPE_FOR_EMPTY_MAPS = + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN); + private static final DataType CQL_TYPE_FOR_EMPTY_VECTORS = DataTypes.vectorOf(DataTypes.INT, 0); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java index d52f79cc9f4..bbf77bdf5dc 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/CodecRegistryConstants.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,7 +30,7 @@ public class CodecRegistryConstants { * *

      This is exposed in case you want to call {@link * DefaultCodecRegistry#DefaultCodecRegistry(String, int, BiFunction, int, BiConsumer, - * TypeCodec[], TypeCodec[])} but only customize the caching options. + * TypeCodec[])} but only customize the caching options. */ public static final TypeCodec[] PRIMITIVE_CODECS = new TypeCodec[] { diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java index 26a93837026..cc14740e180 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/registry/DefaultCodecRegistry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,7 +50,24 @@ public class DefaultCodecRegistry extends CachingCodecRegistry { private final LoadingCache> cache; /** - * Creates a new instance, with some amount of control over the cache behavior. + * Creates a new instance that accepts user codecs, with the default built-in codecs and the + * default cache behavior. + */ + public DefaultCodecRegistry(@NonNull String logPrefix) { + this(logPrefix, CodecRegistryConstants.PRIMITIVE_CODECS); + } + + /** + * Creates a new instance that accepts user codecs, with the given built-in codecs and the default + * cache behavior. + */ + public DefaultCodecRegistry(@NonNull String logPrefix, @NonNull TypeCodec... primitiveCodecs) { + this(logPrefix, 0, null, 0, null, primitiveCodecs); + } + + /** + * Same as {@link #DefaultCodecRegistry(String, TypeCodec[])}, but with some amount of control + * over cache behavior. * *

      Giving full access to the Guava cache API would be too much work, since it is shaded and we * have to wrap everything. If you need something that's not available here, it's easy enough to @@ -61,10 +80,9 @@ public DefaultCodecRegistry( @Nullable BiFunction, Integer> cacheWeigher, int maximumCacheWeight, @Nullable BiConsumer> cacheRemovalListener, - @NonNull TypeCodec[] primitiveCodecs, - @NonNull TypeCodec[] userCodecs) { + @NonNull TypeCodec... primitiveCodecs) { - super(logPrefix, primitiveCodecs, userCodecs); + super(logPrefix, primitiveCodecs); CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); if (initialCacheCapacity > 0) { cacheBuilder.initialCapacity(initialCacheCapacity); @@ -93,17 +111,6 @@ public TypeCodec load(@NonNull CacheKey key) throws Exception { } } - public DefaultCodecRegistry(@NonNull String logPrefix, @NonNull TypeCodec... userCodecs) { - this(logPrefix, CodecRegistryConstants.PRIMITIVE_CODECS, userCodecs); - } - - public DefaultCodecRegistry( - @NonNull String logPrefix, - @NonNull TypeCodec[] primitiveCodecs, - @NonNull TypeCodec... userCodecs) { - this(logPrefix, 0, null, 0, null, primitiveCodecs, userCodecs); - } - @Override protected TypeCodec getCachedCodec( @Nullable DataType cqlType, @Nullable GenericType javaType, boolean isJavaCovariant) { @@ -152,7 +159,10 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(cqlType, javaType, isJavaCovariant); + // NOTE: inlined Objects.hash for performance reasons (avoid Object[] allocation + // seen in profiler allocation traces) + return ((31 + Objects.hashCode(cqlType)) * 31 + Objects.hashCode(javaType)) * 31 + + Boolean.hashCode(isJavaCovariant); } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java b/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java index 918949a13fc..552f84f2ae1 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/type/util/VIntCoding.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,6 +49,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.nio.ByteBuffer; /** * Variable length encoding inspired from Google > 6; } + + public static void writeUnsignedVInt32(int value, ByteBuffer output) { + writeUnsignedVInt((long) value, output); + } + + public static void writeUnsignedVInt(long value, ByteBuffer output) { + int size = VIntCoding.computeUnsignedVIntSize(value); + if (size == 1) { + output.put((byte) value); + return; + } + + output.put(VIntCoding.encodeVInt(value, size), 0, size); + } + + /** + * Read up to a 32-bit integer back, using the unsigned (no zigzag) encoding. + * + *

      Note this method is the same as {@link #readUnsignedVInt(DataInput)}, except that we do + * *not* block if there are not enough bytes in the buffer to reconstruct the value. + * + * @throws VIntOutOfRangeException If the vint doesn't fit into a 32-bit integer + */ + public static int getUnsignedVInt32(ByteBuffer input, int readerIndex) { + return checkedCast(getUnsignedVInt(input, readerIndex)); + } + + public static long getUnsignedVInt(ByteBuffer input, int readerIndex) { + return getUnsignedVInt(input, readerIndex, input.limit()); + } + + public static long getUnsignedVInt(ByteBuffer input, int readerIndex, int readerLimit) { + if (readerIndex < 0) + throw new IllegalArgumentException( + "Reader index should be non-negative, but was " + readerIndex); + + if (readerIndex >= readerLimit) return -1; + + int firstByte = input.get(readerIndex++); + + // Bail out early if this is one byte, necessary or it fails later + if (firstByte >= 0) return firstByte; + + int size = numberOfExtraBytesToRead(firstByte); + if (readerIndex + size > readerLimit) return -1; + + long retval = firstByte & firstByteValueMask(size); + for (int ii = 0; ii < size; ii++) { + byte b = input.get(readerIndex++); + retval <<= 8; + retval |= b & 0xff; + } + + return retval; + } + + public static int checkedCast(long value) { + int result = (int) value; + if ((long) result != value) throw new VIntOutOfRangeException(value); + return result; + } + + /** + * Throw when attempting to decode a vint and the output type doesn't have enough space to fit the + * value that was decoded + */ + public static class VIntOutOfRangeException extends RuntimeException { + public final long value; + + private VIntOutOfRangeException(long value) { + super(value + " is out of range for a 32-bit integer"); + this.value = value; + } + } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java new file mode 100644 index 00000000000..8905edb9192 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/AddressUtils.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.HashSet; +import java.util.Set; + +public class AddressUtils { + + public static Set extract(String address, boolean resolve) { + int separator = address.lastIndexOf(':'); + if (separator < 0) { + throw new IllegalArgumentException("expecting format host:port"); + } + + String host = address.substring(0, separator); + String portString = address.substring(separator + 1); + int port; + try { + port = Integer.parseInt(portString); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("expecting port to be a number, got " + portString, e); + } + if (!resolve) { + return ImmutableSet.of(InetSocketAddress.createUnresolved(host, port)); + } else { + InetAddress[] inetAddresses; + try { + inetAddresses = InetAddress.getAllByName(host); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + Set result = new HashSet<>(); + for (InetAddress inetAddress : inetAddresses) { + result.add(new InetSocketAddress(inetAddress, port)); + } + return result; + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java index 25597e190c9..490b1dc7d17 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ArrayUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.util; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Random; import java.util.concurrent.ThreadLocalRandom; public class ArrayUtils { @@ -75,7 +78,7 @@ public static void shuffleHead(@NonNull ElementT[] elements, int n) { * Fisher-Yates shuffle */ public static void shuffleHead( - @NonNull ElementT[] elements, int n, @NonNull ThreadLocalRandom random) { + @NonNull ElementT[] elements, int n, @NonNull Random random) { if (n > elements.length) { throw new ArrayIndexOutOfBoundsException( String.format( diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java new file mode 100644 index 00000000000..0dd9a85fcc6 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CollectionsUtils.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +public class CollectionsUtils { + public static Map combineListsIntoOrderedMap(List keys, List values) { + if (keys.size() != values.size()) { + throw new IllegalArgumentException("Cannot combine lists with not matching sizes"); + } + + Map map = new LinkedHashMap<>(); + for (int i = 0; i < keys.size(); i++) { + map.put(keys.get(i), values.get(i)); + } + return map; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java index a65808e7b2a..391996d9369 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/CountingIterator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java new file mode 100644 index 00000000000..2e717590569 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DefaultDependencyChecker.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import java.util.concurrent.ConcurrentHashMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A checker for the presence of various {@link Dependency} instances at runtime. Predicate tests + * for Graal substitutions should NOT use this class; see {@link GraalDependencyChecker} for more + * information. + */ +public class DefaultDependencyChecker { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultDependencyChecker.class); + + private static final ConcurrentHashMap CACHE = new ConcurrentHashMap<>(); + + /** + * Return true iff we can find all classes for the dependency on the classpath, false otherwise + * + * @param dependency the dependency to search for + * @return true if the dependency is available, false otherwise + */ + public static boolean isPresent(Dependency dependency) { + try { + return CACHE.computeIfAbsent( + dependency, + (dep) -> { + for (String classNameToTest : dependency.classes()) { + // Always use the driver class loader, assuming that the driver classes and + // the dependency classes are either being loaded by the same class loader, + // or – as in OSGi deployments – by two distinct, but compatible class loaders. + if (Reflection.loadClass(null, classNameToTest) == null) { + return false; + } + } + return true; + }); + } catch (Exception e) { + LOG.warn("Unexpected exception when checking for dependency " + dependency, e); + return false; + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java new file mode 100644 index 00000000000..97cfa25d9af --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Dependency.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * A set of driver optional dependencies and a common mechanism to test the presence of such + * dependencies on the application's classpath. + * + *

      We use the given fully-qualified names of classes to test the presence of the whole dependency + * on the classpath, including its transitive dependencies if applicable. This assumes that if these + * classes are present, then the entire library is present and functional, and vice versa. + * + *

      Note: some of the libraries declared here may be shaded; in these cases the shade plugin will + * replace the package names listed above with names starting with {@code + * com.datastax.oss.driver.shaded.*}, but the presence check would still work as expected. + */ +public enum Dependency { + SNAPPY("org.xerial.snappy.Snappy"), + LZ4("net.jpountz.lz4.LZ4Compressor"), + ESRI("com.esri.core.geometry.ogc.OGCGeometry"), + TINKERPOP( + // gremlin-core + "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal", + // tinkergraph-gremlin + "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0"), + REACTIVE_STREAMS("org.reactivestreams.Publisher"), + JACKSON( + // jackson-core + "com.fasterxml.jackson.core.JsonParser", + // jackson-databind + "com.fasterxml.jackson.databind.ObjectMapper"), + DROPWIZARD("com.codahale.metrics.MetricRegistry"), + ; + + @SuppressWarnings("ImmutableEnumChecker") + private final List clzs; + + Dependency(String... classNames) { + clzs = Collections.unmodifiableList(Arrays.asList(classNames)); + } + + public Iterable classes() { + return this.clzs; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java index 6f75d759451..b9ab863cb88 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/DirectedGraph.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java new file mode 100644 index 00000000000..c80970eb3b6 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/GraalDependencyChecker.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * A dependency checker implementation which should be safe to use for build-time checks when + * building Graal native images. This class is similar to {@link DefaultDependencyChecker} but + * doesn't introduce any external dependencies which might complicate the native image build + * process. Expectation is that this will be most prominently used in the various predicate classes + * which determine whether or not Graal substitutions should be used. + */ +public class GraalDependencyChecker { + + private static final ConcurrentHashMap CACHE = new ConcurrentHashMap<>(); + + /** + * Return true iff we can find all classes for the dependency on the classpath, false otherwise + * + * @param dependency the dependency to search for + * @return true if the dependency is available, false otherwise + */ + public static boolean isPresent(Dependency dependency) { + try { + return CACHE.computeIfAbsent( + dependency, + (dep) -> { + for (String classNameToTest : dependency.classes()) { + // Note that this lands in a pretty similar spot to + // Reflection.loadClass() with a null class loader + // arg. Major difference here is that we avoid the + // more complex exception handling/logging ops in + // that code. + try { + Class.forName(classNameToTest); + } catch (LinkageError | Exception e) { + return false; + } + } + return true; + }); + } catch (Exception e) { + return false; + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java index eeb753830bc..99dca2c60c0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Loggers.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,12 @@ */ package com.datastax.oss.driver.internal.core.util; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.data.AccessibleById; +import com.datastax.oss.driver.api.core.data.AccessibleByName; +import com.datastax.oss.driver.api.core.type.UserDefinedType; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class Loggers { @@ -38,4 +45,10 @@ public static void warnWithException(Logger logger, String format, Object... arg } } } + + // Loggers for API interfaces, declared here in order to keep them internal. + public static Logger COLUMN_DEFINITIONS = LoggerFactory.getLogger(ColumnDefinitions.class); + public static Logger ACCESSIBLE_BY_ID = LoggerFactory.getLogger(AccessibleById.class); + public static Logger ACCESSIBLE_BY_NAME = LoggerFactory.getLogger(AccessibleByName.class); + public static Logger USER_DEFINED_TYPE = LoggerFactory.getLogger(UserDefinedType.class); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java index 9f4ec8bc978..0001bc9925c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/NanoTime.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java index 06b47479eee..f653ea6f5f9 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/ProtocolUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -95,6 +97,10 @@ public static String errorCodeString(int errorCode) { return "FUNCTION_FAILURE"; case ProtocolConstants.ErrorCode.WRITE_FAILURE: return "WRITE_FAILURE"; + case ProtocolConstants.ErrorCode.CDC_WRITE_FAILURE: + return "CDC_WRITE_FAILURE"; + case ProtocolConstants.ErrorCode.CAS_WRITE_UNKNOWN: + return "CAS_WRITE_UNKNOWN"; case ProtocolConstants.ErrorCode.SYNTAX_ERROR: return "SYNTAX_ERROR"; case ProtocolConstants.ErrorCode.UNAUTHORIZED: diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java index d57e23c3982..75a8f5b7380 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Reflection.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,12 +23,16 @@ import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.shaded.guava.common.base.Joiner; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.ListMultimap; import com.datastax.oss.driver.shaded.guava.common.collect.MultimapBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Optional; import org.slf4j.Logger; @@ -37,28 +43,43 @@ public class Reflection { private static final Logger LOG = LoggerFactory.getLogger(Reflection.class); /** - * Loads a class by name. + * Loads a class by name using the given {@link ClassLoader}. * - *

      This methods tries first with the current thread's context class loader (the intent is that - * if the driver is in a low-level loader of an application server -- e.g. bootstrap or system -- - * it can still find classes in the application's class loader). If it is null, it defaults to the - * class loader that loaded the class calling this method. + *

      If the class loader is null, the class will be loaded using the class loader that loaded the + * driver. * - * @return null if the class does not exist. + * @return null if the class does not exist or could not be loaded. */ - public static Class loadClass(ClassLoader classLoader, String className) { + @Nullable + public static Class loadClass(@Nullable ClassLoader classLoader, @NonNull String className) { try { - // If input classLoader is null, use current thread's ClassLoader, if that is null, use - // default (calling class') ClassLoader. - ClassLoader cl = - classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader(); - if (cl != null) { - return Class.forName(className, true, cl); + Class clazz; + if (classLoader == null) { + LOG.trace("Attempting to load {} with driver's class loader", className); + clazz = Class.forName(className); + } else { + LOG.trace("Attempting to load {} with {}", className, classLoader); + clazz = Class.forName(className, true, classLoader); + } + LOG.trace("Successfully loaded {}", className); + return clazz; + } catch (LinkageError | Exception e) { + // Note: only ClassNotFoundException, LinkageError and SecurityException + // are declared to be thrown; however some class loaders (Apache Felix) + // may throw other checked exceptions, which cannot be caught directly + // because that would cause a compilation failure. + LOG.debug( + String.format("Could not load %s with loader %s: %s", className, classLoader, e), e); + if (classLoader == null) { + return null; } else { - return Class.forName(className); + // If the user-supplied class loader is unable to locate the class, try with the driver's + // default class loader. This is useful in OSGi deployments where the user-supplied loader + // may be able to load some classes but not all of them. Besides, the driver bundle, in + // OSGi, has a "Dynamic-Import:*" directive that makes its class loader capable of locating + // a great number of classes. + return loadClass(null, className); } - } catch (ClassNotFoundException e) { - return null; } } @@ -91,6 +112,36 @@ public static Optional buildFromConfig( return buildFromConfig(context, null, classNameOption, expectedSuperType, defaultPackages); } + /** + * Tries to create a list of instances, given an option defined in the driver configuration. + * + *

      For example: + * + *

      +   * my-policy.classes = [my.package.MyPolicyImpl1,my.package.MyPolicyImpl2]
      +   * 
      + * + * Each class will be instantiated via reflection, and must have a constructor that takes a {@link + * DriverContext} argument. + * + * @param context the driver context. + * @param classNamesOption the option that indicates the class list. It will be looked up in the + * default profile of the configuration stored in the context. + * @param expectedSuperType a super-type that the classes are expected to implement/extend. + * @param defaultPackages the default packages to prepend to the class names if they are not + * qualified. They will be tried in order, the first one that matches an existing class will + * be used. + * @return the list of new instances, or an empty list if {@code classNamesOption} is not defined + * in the configuration. + */ + public static ImmutableList buildFromConfigList( + InternalDriverContext context, + DriverOption classNamesOption, + Class expectedSuperType, + String... defaultPackages) { + return buildFromConfigList(context, null, classNamesOption, expectedSuperType, defaultPackages); + } + /** * Tries to create multiple instances of a class, given options defined in the driver * configuration and possibly overridden in profiles. @@ -111,8 +162,11 @@ public static Optional buildFromConfig( * the default profile. * * @param context the driver context. - * @param rootOption the root option for the policy (my-policy in the example above). The class - * name is assumed to be in a 'class' child option. + * @param classNameOption the option that indicates the class (my-policy.class in the example + * above). + * @param rootOption the root of the section containing the policy's configuration (my-policy in + * the example above). Profiles that have the same contents under that section will share the + * same policy instance. * @param expectedSuperType a super-type that the class is expected to implement/extend. * @param defaultPackages the default packages to prepend to the class name if it's not qualified. * They will be tried in order, the first one that matches an existing class will be used. @@ -121,6 +175,7 @@ public static Optional buildFromConfig( */ public static Map buildFromConfigProfiles( InternalDriverContext context, + DriverOption classNameOption, DriverOption rootOption, Class expectedSuperType, String... defaultPackages) { @@ -138,8 +193,7 @@ public static Map buildFromConfigProfiles( // Since all profiles use the same config, we can use any of them String profileName = profiles.iterator().next(); ComponentT policy = - buildFromConfig( - context, profileName, classOption(rootOption), expectedSuperType, defaultPackages) + buildFromConfig(context, profileName, classNameOption, expectedSuperType, defaultPackages) .orElseThrow( () -> new IllegalArgumentException( @@ -179,6 +233,57 @@ public static Optional buildFromConfig( } String className = config.getString(classNameOption); + return Optional.of( + resolveClass( + context, profileName, expectedSuperType, configPath, className, defaultPackages)); + } + + /** + * @param profileName if null, this is a global policy, use the default profile and look for a + * one-arg constructor. If not null, this is a per-profile policy, look for a two-arg + * constructor. + */ + public static ImmutableList buildFromConfigList( + InternalDriverContext context, + String profileName, + DriverOption classNamesOption, + Class expectedSuperType, + String... defaultPackages) { + + DriverExecutionProfile config = + (profileName == null) + ? context.getConfig().getDefaultProfile() + : context.getConfig().getProfile(profileName); + + String configPath = classNamesOption.getPath(); + LOG.debug( + "Creating a list of {} from config option {}", + expectedSuperType.getSimpleName(), + configPath); + + if (!config.isDefined(classNamesOption)) { + LOG.debug("Option is not defined, skipping"); + return ImmutableList.of(); + } + + List classNames = config.getStringList(classNamesOption); + ImmutableList.Builder components = ImmutableList.builder(); + for (String className : classNames) { + components.add( + resolveClass( + context, profileName, expectedSuperType, configPath, className, defaultPackages)); + } + return components.build(); + } + + @NonNull + private static ComponentT resolveClass( + InternalDriverContext context, + String profileName, + Class expectedSuperType, + String configPath, + String className, + String[] defaultPackages) { Class clazz = null; if (className.contains(".")) { LOG.debug("Building from fully-qualified name {}", className); @@ -225,7 +330,7 @@ public static Optional buildFromConfig( (profileName == null) ? constructor.newInstance(context) : constructor.newInstance(context, profileName); - return Optional.of(instance); + return instance; } catch (Exception e) { // ITE just wraps an exception thrown by the constructor, get rid of it: Throwable cause = (e instanceof InvocationTargetException) ? e.getCause() : e; @@ -236,8 +341,4 @@ public static Optional buildFromConfig( cause); } } - - private static DriverOption classOption(DriverOption rootOption) { - return () -> rootOption.getPath() + ".class"; - } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java index fc1ef249edd..7d8895d228f 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/RoutingKey.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java index eb7ebedc6be..337895ec107 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Sizes.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +26,7 @@ import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.data.ValuesHelper; import com.datastax.oss.protocol.internal.FrameCodec; import com.datastax.oss.protocol.internal.PrimitiveSizes; import com.datastax.oss.protocol.internal.request.query.QueryOptions; @@ -44,7 +46,7 @@ public static int minimumRequestSize(Request request) { // Frame header has a fixed size of 9 for protocol version >= V3, which includes Frame flags // size - int size = FrameCodec.headerEncodedSize(); + int size = FrameCodec.V3_ENCODED_HEADER_SIZE; if (!request.getCustomPayload().isEmpty()) { // Custom payload is not supported in v3, but assume user won't have a custom payload set if @@ -83,7 +85,8 @@ public static int sizeOfSimpleStatementValues( List positionalValues = new ArrayList<>(simpleStatement.getPositionalValues().size()); for (Object value : simpleStatement.getPositionalValues()) { - positionalValues.add(Conversions.encode(value, codecRegistry, protocolVersion)); + positionalValues.add( + ValuesHelper.encodeToDefaultCqlMapping(value, codecRegistry, protocolVersion)); } size += Values.sizeOfPositionalValues(positionalValues); @@ -94,7 +97,8 @@ public static int sizeOfSimpleStatementValues( for (Map.Entry value : simpleStatement.getNamedValues().entrySet()) { namedValues.put( value.getKey().asInternal(), - Conversions.encode(value.getValue(), codecRegistry, protocolVersion)); + ValuesHelper.encodeToDefaultCqlMapping( + value.getValue(), codecRegistry, protocolVersion)); } size += Values.sizeOfNamedValues(namedValues); diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java index 50063799a8e..2e85b451c75 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/Strings.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,10 @@ */ package com.datastax.oss.driver.internal.core.util; +import com.datastax.oss.driver.shaded.guava.common.annotations.VisibleForTesting; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.Locale; +import java.util.Objects; public class Strings { @@ -230,8 +235,9 @@ private static String unquote(String text, char quoteChar) { return new String(result); } - private static boolean isReservedCqlKeyword(String id) { - return id != null && RESERVED_KEYWORDS.contains(id.toLowerCase()); + @VisibleForTesting + static boolean isReservedCqlKeyword(String id) { + return id != null && RESERVED_KEYWORDS.contains(id.toLowerCase(Locale.ROOT)); } /** @@ -252,6 +258,21 @@ public static boolean isLongLiteral(String str) { return true; } + /** + * Checks whether the given text is not null and not empty. + * + * @param text The text to check. + * @param name The name of the argument. + * @return The text (for method chaining). + */ + public static String requireNotEmpty(String text, String name) { + Objects.requireNonNull(text, name + " cannot be null"); + if (text.isEmpty()) { + throw new IllegalArgumentException(name + " cannot be empty"); + } + return text; + } + private Strings() {} private static final ImmutableSet RESERVED_KEYWORDS = @@ -261,7 +282,6 @@ private Strings() {} "allow", "alter", "and", - "any", "apply", "asc", "authorize", @@ -270,34 +290,40 @@ private Strings() {} "by", "columnfamily", "create", + "default", "delete", "desc", + "describe", "drop", - "each_quorum", + "entries", + "execute", "from", + "full", "grant", + "if", "in", "index", - "inet", "infinity", "insert", "into", + "is", "keyspace", - "keyspaces", "limit", - "local_one", - "local_quorum", + "materialized", + "mbean", + "mbeans", "modify", "nan", "norecursive", + "not", + "null", "of", "on", - "one", + "or", "order", - "password", "primary", - "quorum", "rename", + "replace", "revoke", "schema", "select", @@ -305,13 +331,13 @@ private Strings() {} "table", "to", "token", - "three", "truncate", - "two", "unlogged", + "unset", "update", "use", "using", + "view", "where", "with"); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java new file mode 100644 index 00000000000..10ca8c0c48d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlan.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.AbstractQueue; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; +import net.jcip.annotations.ThreadSafe; + +/** A query plan that encompasses many child plans, and consumes them one by one. */ +@ThreadSafe +public class CompositeQueryPlan extends AbstractQueue implements QueryPlan { + + private final Queue[] plans; + private final AtomicInteger currentPlan = new AtomicInteger(0); + + @SafeVarargs + public CompositeQueryPlan(@NonNull Queue... plans) { + if (plans.length == 0) { + throw new IllegalArgumentException("at least one child plan must be provided"); + } + for (Queue plan : plans) { + if (plan == null) { + throw new NullPointerException("child plan cannot be null"); + } + } + this.plans = plans; + } + + @Nullable + @Override + public Node poll() { + while (true) { + int current = currentPlan.get(); + Queue plan = plans[current]; + Node n = plan.poll(); + if (n != null) { + return n; + } + int next = current + 1; + if (next == plans.length) { + return null; + } + currentPlan.compareAndSet(current, next); + } + } + + @NonNull + @Override + public Iterator iterator() { + List> its = new ArrayList<>(plans.length); + for (Queue plan : plans) { + its.add(plan.iterator()); + } + return Iterators.concat(its.iterator()); + } + + @Override + public int size() { + int size = 0; + for (Queue plan : plans) { + size += plan.size(); + } + return size; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java new file mode 100644 index 00000000000..53177147695 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/EmptyQueryPlan.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.AbstractQueue; +import java.util.Collections; +import java.util.Iterator; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +class EmptyQueryPlan extends AbstractQueue implements QueryPlan { + + @Override + public Node poll() { + return null; + } + + @NonNull + @Override + public Iterator iterator() { + return Collections.emptyIterator(); + } + + @Override + public int size() { + return 0; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java new file mode 100644 index 00000000000..075143c2e8d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlan.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import net.jcip.annotations.ThreadSafe; + +/** + * A query plan where nodes are computed lazily, when the plan is consumed for the first time. + * + *

      This class can be useful when a query plan computation is heavy but the plan has a low chance + * of ever being consumed, e.g. the last query plan in a {@link CompositeQueryPlan}. + */ +@ThreadSafe +public abstract class LazyQueryPlan extends QueryPlanBase { + + private volatile Object[] nodes; + + /** + * Computes and returns the nodes to use for this query plan. + * + *

      For efficiency, the declared return type is {@code Object[]} but all elements must be + * instances of {@link Node}. See {@link #getNodes()} for details. + * + *

      This method is guaranteed to be invoked only once, at the first call to {@link #poll()}. + * + *

      Implementors must avoid blocking calls in this method as it will be invoked on the driver's + * hot path. + */ + protected abstract Object[] computeNodes(); + + @Override + protected Object[] getNodes() { + if (nodes == null) { + synchronized (this) { + if (nodes == null) { + nodes = computeNodes(); + } + } + } + return nodes; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java index dfe2a45757f..371e100a0e2 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlan.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,98 +17,76 @@ */ package com.datastax.oss.driver.internal.core.util.collection; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.util.AbstractCollection; -import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.Queue; -import java.util.concurrent.atomic.AtomicInteger; import net.jcip.annotations.ThreadSafe; /** - * A specialized, thread-safe queue implementation for {@link - * LoadBalancingPolicy#newQueryPlan(Request, Session)}. + * A specialized, thread-safe node queue for use when creating {@linkplain + * com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy#newQueryPlan(Request, Session) + * query plans}. + * + *

      This interface and its built-in implementations are not general-purpose queues; they are + * tailored for the specific use case of creating query plans in the driver. They make a few + * unconventional API choices for the sake of performance. + * + *

      Furthermore, the driver only consumes query plans through calls to its {@link #poll()} method; + * therefore, this method is the only valid mutation operation for a query plan, other mutating + * methods throw. + * + *

      Both {@link #size()} and {@link #iterator()} are supported and never throw, even if called + * concurrently. These methods are implemented for reporting purposes only, the driver itself does + * not use them. * - *

      All nodes must be provided at construction time. After that, the only valid mutation operation - * is {@link #poll()}, other methods throw. + *

      All built-in {@link QueryPlan} implementations can be safely reused for custom load balancing + * policies; if you plan to do so, study the source code of {@link + * com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy} or {@link + * com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy}. * - *

      This class is not a general-purpose implementation, it is tailored for a specific use case in - * the driver. It makes a few unconventional API choices for the sake of performance (see {@link - * #QueryPlan(Object...)}. It can be reused for custom load balancing policies; if you plan to do - * so, study the source code of {@link DefaultLoadBalancingPolicy}. + * @see QueryPlanBase */ @ThreadSafe -public class QueryPlan extends AbstractCollection implements Queue { +public interface QueryPlan extends Queue { - private final Object[] nodes; - private final AtomicInteger nextIndex = new AtomicInteger(); - - /** - * @param nodes the nodes to initially fill the queue with. For efficiency, there is no defensive - * copy, the provided array is used directly. The declared type is {@code Object[]} because of - * implementation details of {@link DefaultLoadBalancingPolicy}, but all elements must be - * instances of {@link Node}, otherwise instance methods will fail later. - */ - public QueryPlan(@NonNull Object... nodes) { - this.nodes = nodes; - } - - @Nullable - @Override - public Node poll() { - // We don't handle overflow. In practice it won't be an issue, since the driver stops polling - // once the query plan is empty. - int i = nextIndex.getAndIncrement(); - return (i >= nodes.length) ? null : (Node) nodes[i]; - } + QueryPlan EMPTY = new EmptyQueryPlan(); /** * {@inheritDoc} * - *

      The returned iterator reflects the state of the queue at the time of the call, and is not - * affected by further modifications. + *

      Implementation note: query plan iterators are snapshots that reflect the contents of the + * queue at the time of the call, and are not affected by further modifications. Successive calls + * to this method will return different objects. */ @NonNull @Override - public Iterator iterator() { - int i = nextIndex.get(); - if (i >= nodes.length) { - return Collections.emptyList().iterator(); - } else { - return Iterators.forArray(Arrays.copyOfRange(nodes, i, nodes.length, Node[].class)); - } - } + Iterator iterator(); @Override - public int size() { - return Math.max(nodes.length - nextIndex.get(), 0); + default boolean offer(Node node) { + throw new UnsupportedOperationException("Not implemented"); } @Override - public boolean offer(Node node) { + default Node peek() { throw new UnsupportedOperationException("Not implemented"); } @Override - public Node remove() { + default boolean add(Node node) { throw new UnsupportedOperationException("Not implemented"); } @Override - public Node element() { + default Node remove() { throw new UnsupportedOperationException("Not implemented"); } @Override - public Node peek() { + default Node element() { throw new UnsupportedOperationException("Not implemented"); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java new file mode 100644 index 00000000000..43f369f636a --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanBase.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.AbstractQueue; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicInteger; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public abstract class QueryPlanBase extends AbstractQueue implements QueryPlan { + + private final AtomicInteger nextIndex = new AtomicInteger(); + + /** + * Returns the nodes in this query plan; the returned array should stay the same across + * invocations. + * + *

      The declared return type is {@code Object[]} because of implementation details of {@link + * com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy + * DefaultLoadBalancingPolicy} and {@link + * com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy + * BasicLoadBalancingPolicy}, but all elements must be instances of {@link Node}, otherwise + * instance methods will fail later. + */ + protected abstract Object[] getNodes(); + + @Nullable + @Override + public Node poll() { + // We don't handle overflow. In practice it won't be an issue, since the driver stops polling + // once the query plan is empty. + int i = nextIndex.getAndIncrement(); + Object[] nodes = getNodes(); + return (i >= nodes.length) ? null : (Node) nodes[i]; + } + + @NonNull + @Override + public Iterator iterator() { + int i = nextIndex.get(); + Object[] nodes = getNodes(); + if (i >= nodes.length) { + return Collections.emptyIterator(); + } else { + return Iterators.forArray(Arrays.copyOfRange(nodes, i, nodes.length, Node[].class)); + } + } + + @Override + public int size() { + return Math.max(getNodes().length - nextIndex.get(), 0); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java new file mode 100644 index 00000000000..4e0df8d2354 --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlan.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.ThreadSafe; + +/** Query plan where nodes must be provided at construction time. */ +@ThreadSafe +public class SimpleQueryPlan extends QueryPlanBase { + + private final Object[] nodes; + + /** + * Creates a new query plan with the given nodes. + * + *

      For efficiency, there is no defensive copy, the provided array is used directly. The + * declared type is {@code Object[]} but all elements must be instances of {@link Node}. See + * {@link #getNodes()} for details. + * + * @param nodes the nodes to initially fill the queue with. + */ + public SimpleQueryPlan(@NonNull Object... nodes) { + this.nodes = nodes; + } + + @Override + protected Object[] getNodes() { + return nodes; + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java index 7797594b7b9..3f2d10b62e0 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/BlockingOperation.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,7 +61,7 @@ public Thread newThread(@NonNull Runnable r) { } } - private static class InternalThread extends FastThreadLocalThread { + static class InternalThread extends FastThreadLocalThread { private InternalThread(Runnable runnable) { super(runnable); } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java index 26c244db7e3..275b2ddfeef 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFutures.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +20,7 @@ import com.datastax.oss.driver.api.core.DriverException; import com.datastax.oss.driver.api.core.DriverExecutionException; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.base.Throwables; import java.util.List; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; @@ -97,7 +100,10 @@ public static CompletionStage allSuccessful(List> i } else { Throwable finalError = errors.get(0); for (int i = 1; i < errors.size(); i++) { - finalError.addSuppressed(errors.get(i)); + Throwable suppressedError = errors.get(i); + if (finalError != suppressedError) { + finalError.addSuppressed(suppressedError); + } } result.completeExceptionally(finalError); } @@ -147,6 +153,7 @@ public static T getUninterruptibly(CompletionStage stage) { if (cause instanceof DriverException) { throw ((DriverException) cause).copy(); } + Throwables.throwIfUnchecked(cause); throw new DriverExecutionException(cause); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java index 25fb3a4f8d8..548ee0bb042 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java index ded770a3d48..6bde155858c 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Debouncer.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,6 +46,7 @@ public class Debouncer { private static final Logger LOG = LoggerFactory.getLogger(Debouncer.class); + private final String logPrefix; private final EventExecutor adminExecutor; private final Consumer onFlush; private final Duration window; @@ -69,6 +72,27 @@ public Debouncer( Consumer onFlush, Duration window, long maxEvents) { + this("debouncer", adminExecutor, coalescer, onFlush, window, maxEvents); + } + + /** + * Creates a new instance. + * + * @param logPrefix the log prefix to use in log messages. + * @param adminExecutor the executor that will be used to schedule all tasks. + * @param coalescer how to transform a batch of events into a result. + * @param onFlush what to do with a result. + * @param window the time window. + * @param maxEvents the maximum number of accumulated events before a flush is forced. + */ + public Debouncer( + String logPrefix, + EventExecutor adminExecutor, + Function, CoalescedT> coalescer, + Consumer onFlush, + Duration window, + long maxEvents) { + this.logPrefix = logPrefix; this.coalescer = coalescer; Preconditions.checkArgument(maxEvents >= 1, "maxEvents should be at least 1"); this.adminExecutor = adminExecutor; @@ -85,7 +109,8 @@ public void receive(IncomingT element) { } if (window.isZero() || maxEvents == 1) { LOG.debug( - "Received {}, flushing immediately (window = {}, maxEvents = {})", + "[{}] Received {}, flushing immediately (window = {}, maxEvents = {})", + logPrefix, element, window, maxEvents); @@ -94,12 +119,13 @@ public void receive(IncomingT element) { currentBatch.add(element); if (currentBatch.size() == maxEvents) { LOG.debug( - "Received {}, flushing immediately (because {} accumulated events)", + "[{}] Received {}, flushing immediately (because {} accumulated events)", + logPrefix, element, maxEvents); flushNow(); } else { - LOG.debug("Received {}, scheduling next flush in {}", element, window); + LOG.debug("[{}] Received {}, scheduling next flush in {}", logPrefix, element, window); scheduleFlush(); } } @@ -107,7 +133,7 @@ public void receive(IncomingT element) { public void flushNow() { assert adminExecutor.inEventLoop(); - LOG.debug("Flushing now"); + LOG.debug("[{}] Flushing now", logPrefix); cancelNextFlush(); if (!currentBatch.isEmpty()) { onFlush.accept(coalescer.apply(currentBatch)); @@ -127,7 +153,7 @@ private void cancelNextFlush() { if (nextFlush != null && !nextFlush.isDone()) { boolean cancelled = nextFlush.cancel(true); if (cancelled) { - LOG.debug("Cancelled existing scheduled flush"); + LOG.debug("[{}] Cancelled existing scheduled flush", logPrefix); } } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java new file mode 100644 index 00000000000..7d90c50028e --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegration.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.concurrent; + +import com.datastax.oss.driver.internal.core.util.concurrent.BlockingOperation.InternalThread; +import reactor.blockhound.BlockHound; +import reactor.blockhound.integration.BlockHoundIntegration; + +public final class DriverBlockHoundIntegration implements BlockHoundIntegration { + + @Override + public void applyTo(BlockHound.Builder builder) { + + // disallow blocking operations in driver internal threads by default; + // note that session initialization will happen on one of these threads, which is why + // we need to allow a few blocking calls below. + builder.nonBlockingThreadPredicate(current -> current.or(InternalThread.class::isInstance)); + + // blocking calls in initialization methods + + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.context.DefaultNettyOptions", "createTimer"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.os.Native$LibcLoader", "load"); + builder.allowBlockingCallsInside( + // requires native libraries + "com.datastax.oss.driver.internal.core.time.Clock", "getInstance"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.util.concurrent.LazyReference", "get"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "accept"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "markReady"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter", "start"); + + // called upon initialization but also on topology/status events + + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper$SinglePolicyDistanceReporter", + "setDistance"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.pool.ChannelSet", "add"); + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.pool.ChannelSet", "remove"); + + // never called directly by the driver; locks that usually operate with low thread contention + + builder.allowBlockingCallsInside( + "com.datastax.oss.driver.internal.core.type.codec.registry.CachingCodecRegistry", + "register"); + builder.allowBlockingCallsInside( + // requires native libraries, for now because of Uuids.getProcessPiece; if JAVA-1116 gets + // implemented, Uuids.getCurrentTimestamp will also require an exception. Pre-emptively + // protect the whole Uuids.timeBased method. + "com.datastax.oss.driver.api.core.uuid.Uuids", "timeBased"); + + // continuous paging + + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "cancel"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "dequeueOrCreatePending"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "isLastResponse"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "onFailure"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "onPageTimeout"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "onResponse"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "onStreamIdAssigned"); + builder.allowBlockingCallsInside( + "com.datastax.dse.driver.internal.core.cql.continuous.ContinuousRequestHandlerBase$NodeResponseCallback", + "operationComplete"); + + // Netty extra exceptions + + // see https://github.com/netty/netty/pull/10810 + builder.allowBlockingCallsInside("io.netty.util.HashedWheelTimer", "start"); + builder.allowBlockingCallsInside("io.netty.util.HashedWheelTimer", "stop"); + + // see https://github.com/netty/netty/pull/10811 + builder.allowBlockingCallsInside("io.netty.util.concurrent.GlobalEventExecutor", "addTask"); + builder.allowBlockingCallsInside( + "io.netty.util.concurrent.SingleThreadEventExecutor", "addTask"); + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java index b1d34dda6ea..e04b7647d8e 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/LazyReference.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,7 +29,7 @@ public class LazyReference { private final Supplier supplier; private final CycleDetector checker; private volatile T value; - private ReentrantLock lock = new ReentrantLock(); + private final ReentrantLock lock = new ReentrantLock(); public LazyReference(String name, Supplier supplier, CycleDetector cycleDetector) { this.name = name; diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java new file mode 100644 index 00000000000..b854820403d --- /dev/null +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombiner.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.concurrent; + +import edu.umd.cs.findbugs.annotations.NonNull; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import io.netty.util.concurrent.Promise; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import net.jcip.annotations.ThreadSafe; + +/** + * A thread-safe version of Netty's {@link io.netty.util.concurrent.PromiseCombiner} that uses + * proper synchronization to trigger the completion of the aggregate promise. + */ +@ThreadSafe +public class PromiseCombiner { + + /** + * Combines the given futures into the given promise, that is, ties the completion of the latter + * to that of the formers. + * + * @param aggregatePromise The promise that will complete when all parents complete. + * @param parents The parent futures. + */ + public static void combine( + @NonNull Promise aggregatePromise, @NonNull Future... parents) { + PromiseCombinerListener listener = + new PromiseCombinerListener(aggregatePromise, parents.length); + for (Future parent : parents) { + parent.addListener(listener); + } + } + + private static class PromiseCombinerListener implements GenericFutureListener> { + + private final Promise aggregatePromise; + private final AtomicInteger remainingCount; + private final AtomicReference aggregateFailureRef = new AtomicReference<>(); + + private PromiseCombinerListener(Promise aggregatePromise, int numberOfParents) { + this.aggregatePromise = aggregatePromise; + remainingCount = new AtomicInteger(numberOfParents); + } + + @Override + public void operationComplete(Future future) { + if (!future.isSuccess()) { + aggregateFailureRef.updateAndGet( + aggregateFailure -> { + if (aggregateFailure == null) { + aggregateFailure = future.cause(); + } else { + aggregateFailure.addSuppressed(future.cause()); + } + return aggregateFailure; + }); + } + if (remainingCount.decrementAndGet() == 0) { + Throwable aggregateFailure = aggregateFailureRef.get(); + if (aggregateFailure != null) { + aggregatePromise.tryFailure(aggregateFailure); + } else { + aggregatePromise.trySuccess(null); + } + } + } + } +} diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java index d40265bd09a..28aaf596705 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/Reconnection.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java index 5d6fd62918a..27ca1b6ff42 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -55,7 +57,7 @@ private enum State { private State state; @GuardedBy("stateLock") - private List recordedEvents; + private final List recordedEvents; public ReplayingEventFilter(Consumer consumer) { this.consumer = consumer; @@ -80,6 +82,7 @@ public void markReady() { consumer.accept(event); } } finally { + recordedEvents.clear(); stateLock.writeLock().unlock(); } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java index 3bf689a4670..addaf1850bf 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/RunOrSchedule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -81,13 +83,13 @@ public static CompletionStage on( executor .submit(task) .addListener( - ((Future> f) -> { + (Future> f) -> { if (f.isSuccess()) { CompletableFutures.completeFrom(f.getNow(), result); } else { result.completeExceptionally(f.cause()); } - })); + }); return result; } } diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java index 1fc9060b710..25bce8773e8 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/concurrent/UncaughtExceptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java index 17aae22cc7b..bd0e2590b47 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/core/util/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/main/java/com/datastax/oss/driver/internal/package-info.java b/core/src/main/java/com/datastax/oss/driver/internal/package-info.java index 1ba376a01dc..486afc446e3 100644 --- a/core/src/main/java/com/datastax/oss/driver/internal/package-info.java +++ b/core/src/main/java/com/datastax/oss/driver/internal/package-info.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +20,6 @@ * *

      The types present here (and in subpackages) should not be used from client applications. If * you decide to use them, do so at your own risk: binary compatibility is best-effort, and we - * reserve the right to break things at any time. Documentation may be sparse + * reserve the right to break things at any time. Documentation may be sparse. */ package com.datastax.oss.driver.internal; diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties new file mode 100644 index 00000000000..2baa59f3b07 --- /dev/null +++ b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/native-image.properties @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +Args=-H:IncludeResources=reference\\.conf \ + -H:IncludeResources=application\\.conf \ + -H:IncludeResources=application\\.json \ + -H:IncludeResources=application\\.properties \ + -H:IncludeResources=.*Driver\\.properties \ + -H:DynamicProxyConfigurationResources=${.}/proxy.json \ + -H:ReflectionConfigurationResources=${.}/reflection.json \ + --initialize-at-build-time=com.datastax.oss.driver.internal.core.util.Dependency diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json new file mode 100644 index 00000000000..37cf6fcf805 --- /dev/null +++ b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/proxy.json @@ -0,0 +1,3 @@ +[ + ["java.lang.reflect.TypeVariable"] +] diff --git a/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json new file mode 100644 index 00000000000..6082b853611 --- /dev/null +++ b/core/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-core/reflection.json @@ -0,0 +1,154 @@ +[ + { + "name": "com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.loadbalancing.DcInferringLoadBalancingPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.loadbalancing.BasicLoadBalancingPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext", "java.lang.String" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.time.AtomicTimestampGenerator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.time.ThreadLocalTimestampGenerator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.time.ServerSideTimestampGenerator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.tracker.RequestLogger", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.session.throttling.RateLimitingRequestThrottler", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.addresstranslation.Ec2MultiRegionAddressTranslator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metrics.DefaultMetricsFactory", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metrics.NoopMetricsFactory", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metrics.DropwizardMetricsFactory", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metrics.DefaultMetricIdGenerator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "com.datastax.oss.driver.internal.core.metrics.TaggingMetricIdGenerator", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + }, + { + "name": "io.netty.channel.socket.nio.NioSocketChannel", + "methods": [ { "name": "", "parameterTypes": [] } ] + }, + { + "name": "io.netty.buffer.AbstractByteBufAllocator", + "methods": [ { "name": "toLeakAwareBuffer", "parameterTypes": ["io.netty.buffer.ByteBuf" ] } ] + }, + { + "name": "io.netty.util.ReferenceCountUtil", + "methods": [ { "name": "touch", "parameterTypes": ["java.lang.Object", "java.lang.Object" ] } ] + }, + { + "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField", + "fields": [ {"name": "producerIndex", "allowUnsafeAccess": true} ] + }, + { + "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField", + "fields": [ {"name": "producerLimit", "allowUnsafeAccess": true} ] + }, + { + "name" : "io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField", + "fields": [ {"name": "consumerIndex", "allowUnsafeAccess": true} ] + }, + { + "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields", + "fields": [ {"name": "producerIndex", "allowUnsafeAccess": true} ] + }, + { + "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields", + "fields": [ {"name": "producerLimit", "allowUnsafeAccess": true} ] + }, + { + "name" : "io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields", + "fields": [ {"name": "consumerIndex", "allowUnsafeAccess": true} ] + } +] diff --git a/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration b/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration new file mode 100644 index 00000000000..b848ce24855 --- /dev/null +++ b/core/src/main/resources/META-INF/services/reactor.blockhound.integration.BlockHoundIntegration @@ -0,0 +1 @@ +com.datastax.oss.driver.internal.core.util.concurrent.DriverBlockHoundIntegration \ No newline at end of file diff --git a/core/src/main/resources/com/datastax/oss/driver/Driver.properties b/core/src/main/resources/com/datastax/oss/driver/Driver.properties index a62ba0a538a..4706afe2da8 100644 --- a/core/src/main/resources/com/datastax/oss/driver/Driver.properties +++ b/core/src/main/resources/com/datastax/oss/driver/Driver.properties @@ -1,11 +1,13 @@ # -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -23,4 +25,4 @@ driver.version=${project.version} # It would be better to use ${project.parent.name} here, but for some reason the bundle plugin # prevents that from being resolved correctly (unlike the project-level properties above). # The value is not likely to change, so we simply hard-code it: -driver.name=DataStax Java driver for Apache Cassandra(R) +driver.name=Apache Cassandra Java Driver diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf index 1252bec02f6..4ae83362e29 100644 --- a/core/src/main/resources/reference.conf +++ b/core/src/main/resources/reference.conf @@ -1,4 +1,21 @@ -# Reference configuration for the DataStax Java driver for Apache Cassandra®. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Reference configuration for the Java Driver for Apache Cassandra®. # # Unless you use a custom mechanism to load your configuration (see # SessionBuilder.withConfigLoader), all the values declared here will be used as defaults. You can @@ -150,10 +167,30 @@ datastax-java-driver { # - when the policies assign distances to nodes, the driver uses the closest assigned distance # for any given node. basic.load-balancing-policy { - # The class of the policy. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.loadbalancing. - # - # The driver provides a single implementation out of the box: DefaultLoadBalancingPolicy. + # The class of the policy. If it is not qualified, the driver assumes that it resides in one of + # the following packages: + # - com.datastax.oss.driver.internal.core.loadbalancing. + # - com.datastax.dse.driver.internal.core.loadbalancing. + # + # The driver provides three implementations out of the box: + # + # - `DefaultLoadBalancingPolicy`: should almost always be used; it requires a local datacenter + # to be specified either programmatically when creating the session, or via the configuration + # option: datastax-java-driver.basic.load-balancing-policy.local-datacenter. It can also + # use a highly efficient slow replica avoidance mechanism, which is by default enabled – see + # the option: datastax-java-driver.basic.load-balancing-policy.slow-replica-avoidance. + # - `DcInferringLoadBalancingPolicy`: similar to `DefaultLoadBalancingPolicy`, but does not + # require a local datacenter to be defined, in which case it will attempt to infer the local + # datacenter from the provided contact points, if possible; if that fails, it will throw an + # error during session initialization. This policy is intended mostly for ETL tools and + # should not be used by normal applications. + # - `BasicLoadBalancingPolicy`: similar to `DefaultLoadBalancingPolicy`, but does not have + # the slow replica avoidance mechanism. More importantly, it is the only policy capable of + # operating without local datacenter defined, in which case it will consider nodes in the + # cluster in a datacenter-agnostic way. Beware that this could cause spikes in + # cross-datacenter traffic! This policy is provided mostly as a starting point for users + # wishing to implement their own load balancing policy; it should not be used as is in normal + # applications. # # You can also specify a custom class that implements LoadBalancingPolicy and has a public # constructor with two arguments: the DriverContext and a String representing the profile name. @@ -162,23 +199,44 @@ datastax-java-driver { # The datacenter that is considered "local": the default policy will only include nodes from # this datacenter in its query plans. # - # This option can only be absent if you specified no contact points: in that case, the driver - # defaults to 127.0.0.1:9042, and that node's datacenter is used as the local datacenter. - # - # As soon as you provide contact points (either through the configuration or through the cluster - # builder), you must define the local datacenter explicitly, and initialization will fail if - # this property is absent. In addition, all contact points should be from this datacenter; - # warnings will be logged for nodes that are from a different one. + # When using the default policy, this option can only be absent if you specified no contact + # points: in that case, the driver defaults to 127.0.0.1:9042, and that node's datacenter is + # used as the local datacenter. As soon as you provide contact points (either through the + # configuration or through the session builder), you must define the local datacenter + # explicitly, and initialization will fail if this property is absent. In addition, all contact + # points should be from this datacenter; warnings will be logged for nodes that are from a + # different one. # # This can also be specified programmatically with SessionBuilder.withLocalDatacenter. If both # are specified, the programmatic value takes precedence. // local-datacenter = datacenter1 + # The class of a custom node distance evaluator. + # + # This option is not required; if present, it must be the fully-qualified name of a class that + # implements `com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator`, and has a + # public constructor taking two arguments: the DriverContext and a String representing the + # profile name. + # + # Alternatively, you can pass an instance of your distance evaluator to + # CqlSession.builder().withNodeDistanceEvaluator(). In that case, this option will be ignored. + # + # The evaluator will be invoked each time the policy processes a topology or state change. The + # evaluator's `evaluateDistance` method will be called with the node affected by the change, and + # the local datacenter name (or null if none is defined). If it returns a non-null distance, the + # policy will suggest that distance for the node; if the function returns null, the policy will + # will assign a default distance instead, based on its internal algorithm for computing node + # distances. + // evaluator.class= + + # DEPRECATED. Use evaluator.class instead (see above). If both evaluator.class and filter.class + # are defined, the former wins. + # # A custom filter to include/exclude nodes. # # This option is not required; if present, it must be the fully-qualified name of a class that - # implements `java.util.function.Predicate`, and has a public constructor taking a single - # `DriverContext` argument. + # implements `java.util.function.Predicate`, and has a public constructor taking two + # arguments: the DriverContext and a String representing the profile name. # # Alternatively, you can pass an instance of your filter to # CqlSession.builder().withNodeFilter(). In that case, this option will be ignored. @@ -187,12 +245,185 @@ datastax-java-driver { # topology or state change: if it returns false, the node will be set at distance IGNORED # (meaning the driver won't ever connect to it), and never included in any query plan. // filter.class= + + # Whether to enable the slow replica avoidance mechanism in DefaultLoadBalancingPolicy. + # + # The default policy always moves replicas first in the query plan (if routing information can + # be determined for the current request). However: + # - if this option is true, it also applies a custom algorithm that takes the responsiveness and + # uptime of each replica into account to order them among each other; + # - if this option is false, replicas are simply shuffled. + # + # If this option is not defined, the driver defaults to true. + slow-replica-avoidance = true + } + basic.cloud { + # The location of the cloud secure bundle used to connect to DataStax Apache Cassandra as a + # service. + # This setting must be a valid URL. + # If the protocol is not specified, it is implicitly assumed to be the `file://` protocol, + # in which case the value is expected to be a valid path on the local filesystem. + # For example, `/a/path/to/bundle` will be interpreted as `file:/a/path/to/bunde`. + # If the protocol is provided explicitly, then the value will be used as is. + # + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + // secure-connect-bundle = /location/of/secure/connect/bundle + } + + # DataStax Insights monitoring. + basic.application { + # The name of the application using the session. + # + # It will be sent in the STARTUP protocol message for each new connection established by the + # driver. + # + # This can also be defined programmatically with DseSessionBuilder.withApplicationName(). If you + # specify both, the programmatic value takes precedence and this option is ignored. + # + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + // name = + + # The version of the application using the session. + # + # It will be sent in the STARTUP protocol message for each new connection established by the + # driver. + # + # This can also be defined programmatically with DseSessionBuilder.withApplicationVersion(). If + # you specify both, the programmatic value takes precedence and this option is ignored. + # + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + // version = + } + + # Graph (DataStax Enterprise only) + basic.graph { + # The name of the graph targeted by graph statements. + # + # This can also be overridden programmatically with GraphStatement.setGraphName(). If both are + # specified, the programmatic value takes precedence, and this option is ignored. + # + # Required: no. In particular, system queries -- such as creating or dropping a graph -- must be + # executed without a graph name (see also basic.graph.is-system-query). + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + // name = your-graph-name + + # The traversal source to use for graph statements. + # + # This setting doesn't usually need to change, unless executing OLAP queries, which require the + # traversal source "a". + # + # This can also be overridden programmatically with GraphStatement.setTraversalSource(). If both + # are specified, the programmatic value takes precedence, and this option is ignored. + # + # Required: no + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + traversal-source = "g" + + # Whether a script statement represents a system query. + # + # Script statements that access the `system` variable *must not* specify a graph name (otherwise + # `system` is not available). However, if your application executes a lot of non-system + # statements, it is convenient to configure basic.graph.name to avoid repeating it every time. + # This option allows you to ignore that global graph name, for example in a specific profile. + # + # This can also be overridden programmatically with ScriptGraphStatement.setSystemQuery(). If + # both are specified, the programmatic value takes precedence, and this option is ignored. + # + # Required: no (defaults to false) + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + // is-system-query = false + + # The read consistency level to use for graph statements. + # + # DSE Graph is able to distinguish between read and write timeouts for the internal storage + # queries that will be produced by a traversal. Hence the consistency level for reads and writes + # can be set separately. + # + # This can also be overridden programmatically with GraphStatement.setReadConsistencyLevel(). If + # both are specified, the programmatic value takes precedence, and this option is ignored. + # + # Required: no (defaults to request.basic.consistency) + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + // read-consistency-level = LOCAL_QUORUM + + # The write consistency level to use for graph statements. + # + # DSE Graph is able to distinguish between read and write timeouts for the internal storage + # queries that will be produced by a traversal. Hence the consistency level for reads and writes + # can be set separately. + # + # This can also be overridden programmatically with GraphStatement.setReadConsistencyLevel(). If + # both are specified, the programmatic value takes precedence, and this option is ignored. + # + # Required: no (defaults to request.basic.consistency) + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + // write-consistency-level = LOCAL_ONE + + # How long the driver waits for a graph request to complete. This is a global limit on the + # duration of a session.execute() call, including any internal retries the driver might do. + # + # Graph statements behave a bit differently than regular CQL requests (hence this dedicated + # option instead of reusing basic.request.timeout): by default, the client timeout is not set, + # and the driver will just wait as long as needed until the server replies (which is itself + # governed by server-side timeout configuration). + # If you specify a client timeout with this option, then the driver will fail the request after + # the given time; note that the value is also sent along with the request, so that the server + # can also time out early and avoid wasting resources on a response that the client has already + # given up on. + # + # This can also be overridden programmatically with GraphStatement.setTimeout(). If both are + # specified, the programmatic value takes precedence, and this option is ignored. + # + # If this value is left unset (default) or is explicitly set to zero, no timeout will be + # applied. + # + # Required: no (defaults to zero - no timeout) + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + // timeout = 10 seconds } # ADVANCED OPTIONS ------------------------------------------------------------------------------- + # The maximum number of live sessions that are allowed to coexist in a given VM. + # + # This is intended to help detect resource leaks in client applications that create too many + # sessions and/or do not close them correctly. The driver keeps track of the number of live + # sessions in a static variable; if it gets over this threshold, a warning will be logged for + # every new session. + # + # If the value is less than or equal to 0, the feature is disabled: no warning will be issued. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for sessions created after the change. + # Overridable in a profile: no + advanced.session-leak.threshold = 4 + advanced.connection { + # The timeout to use when establishing driver connections. + # + # This timeout is for controlling how long the driver will wait for the underlying channel + # to actually connect to the server. This is not the time limit for completing protocol + # negotiations, only the time limit for establishing a channel connection. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for connections created after the + # change. + # Overridable in a profile: no + connect-timeout = 5 seconds + # The timeout to use for internal queries that run as part of the initialization process, just # after we open a connection. If this timeout fires, the initialization of the connection will # fail. If this is the first connection ever, the driver will fail to initialize as well, @@ -202,7 +433,7 @@ datastax-java-driver { # Modifiable at runtime: yes, the new value will be used for connections created after the # change. # Overridable in a profile: no - init-query-timeout = 500 milliseconds + init-query-timeout = 5 seconds # The timeout to use when the driver changes the keyspace on a connection at runtime (this # happens when the client issues a `USE ...` query, and all connections belonging to the current @@ -215,24 +446,52 @@ datastax-java-driver { set-keyspace-timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} # The driver maintains a connection pool to each node, according to the distance assigned to it - # by the load balancing policy. If the distance is IGNORED, no connections are maintained. + # by the load balancing policy. + # If the distance is LOCAL, then local.size connections are opened; if the distance is REMOTE, + # then remote.size connections are opened. If the distance is IGNORED, no connections at all + # are maintained. pool { - local { - # The number of connections in the pool. - # - # Required: yes - # Modifiable at runtime: yes; when the change is detected, all active pools will be notified - # and will adjust their size. - # Overridable in a profile: no - size = 1 - } - remote { - size = 1 - } + # The number of connections in the pool for a node whose distance is LOCAL, that is, a node + # that belongs to the local datacenter, as inferred by the load balancing or defined by the + # option: datastax-java-driver.basic.load-balancing-policy.local-datacenter. + # + # Each connection can handle many concurrent requests, so 1 is generally a good place to + # start. You should only need higher values in very high performance scenarios, where + # connections might start maxing out their I/O thread (see the driver's online manual for + # more tuning instructions). + # + # Required: yes + # Modifiable at runtime: yes; when the change is detected, all active pools will be notified + # and will adjust their size. + # Overridable in a profile: no + local.size = 1 + + # The number of connections in the pool for a node whose distance is REMOTE, that is, a node + # that does not belong to the local datacenter. + # + # Note: by default, the built-in load-balancing policies will never assign the REMOTE distance + # to any node, to avoid cross-datacenter network traffic. If you want to change this behavior + # and understand the consequences, configure your policy to accept nodes in remote + # datacenters by adjusting the following advanced options: + # + # - datastax-java-driver.advanced.load-balancing-policy.dc-failover.max-nodes-per-remote-dc + # - datastax-java-driver.advanced.load-balancing-policy.dc-failover.allow-for-local-consistency-levels + # + # Required: yes + # Modifiable at runtime: yes; when the change is detected, all active pools will be notified + # and will adjust their size. + # Overridable in a profile: no + remote.size = 1 } # The maximum number of requests that can be executed concurrently on a connection. This must be - # between 1 and 32768. + # strictly positive, and less than 32768. + # + # We recommend against changing this value: the default of 1024 is fine for most situations, + # it's a good balance between sufficient concurrency on the client and reasonable pressure on + # the server. If you're looking for a way to limit the global throughput of the session, this is + # not the right way to do it: use a request throttler instead (see the `advanced.throttler` + # section in this configuration). # # Required: yes # Modifiable at runtime: yes, the new value will be used for connections created after the @@ -251,11 +510,13 @@ datastax-java-driver { # accumulate over time, eventually affecting the connection's throughput. So we monitor them # and close the connection above a given threshold (the pool will replace it). # + # The value must be lower than `max-requests-per-connection`. + # # Required: yes # Modifiable at runtime: yes, the new value will be used for connections created after the # change. # Overridable in a profile: no - max-orphan-requests = 24576 + max-orphan-requests = 256 # Whether to log non-fatal errors when the driver tries to open a new connection. # @@ -276,6 +537,53 @@ datastax-java-driver { warn-on-init-error = true } + # Advanced options for the built-in load-balancing policies. + advanced.load-balancing-policy { + # Cross-datacenter failover configuration: configure the load-balancing policies to use nodes + # in remote datacenters. + dc-failover { + # The maximum number of nodes to contact in each remote datacenter. + # + # By default, this number is zero, to avoid cross-datacenter network traffic. When this + # number is greater than zero: + # + # - The load policies will assign the REMOTE distance to that many nodes in each remote + # datacenter. + # - The driver will then attempt to open connections to those nodes. The actual number of + # connections to open to each one of those nodes is configurable via the option: + # datastax-java-driver.advanced.connection.pool.remote.size. + # - The load-balancing policies will include those remote nodes (and only those) in query + # plans, effectively enabling cross-datacenter failover. + # + # Beware that enabling such failover can result in cross-datacenter network traffic spikes, + # if the local datacenter is down or experiencing high latencies! + # + # Required: yes + # Modifiable at runtime: no + # Overridable in a profile: yes + max-nodes-per-remote-dc = 0 + + # Whether cross-datacenter failover should be allowed for requests executed with local + # consistency levels (LOCAL_ONE, LOCAL_QUORUM and LOCAL_SERIAL). + # + # This is disabled by default. Enabling this feature may have unexpected results, since a + # local consistency level may have different semantics depending on the replication factor in + # use in each datacenter. + # + # Required: yes + # Modifiable at runtime: no + # Overridable in a profile: yes + allow-for-local-consistency-levels = false + + # Ordered preference list of remote dc's (in order) optionally supplied for automatic failover. While building a query plan, the driver uses the DC's supplied in order together with max-nodes-per-remote-dc + # Users are not required to specify all DCs, when listing preferences via this config + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + preferred-remote-dcs = [""] + } + } + # Whether to schedule reconnection attempts if all contact points are unreachable on the first # initialization attempt. # @@ -328,7 +636,13 @@ datastax-java-driver { # The class of the policy. If it is not qualified, the driver assumes that it resides in the # package com.datastax.oss.driver.internal.core.retry. # - # The driver provides a single implementation out of the box: DefaultRetryPolicy. + # The driver provides two implementations out of the box: + # + # - DefaultRetryPolicy: the default policy, should almost always be the right choice. + # - ConsistencyDowngradingRetryPolicy: an alternative policy that weakens consistency guarantees + # as a trade-off to maximize the chance of success when retrying. Use with caution. + # + # Refer to the manual to understand how these policies work. # # You can also specify a custom class that implements RetryPolicy and has a public constructor # with two arguments: the DriverContext and a String representing the profile name. @@ -382,20 +696,67 @@ datastax-java-driver { # Required: no. If the 'class' child option is absent, no authentication will occur. # Modifiable at runtime: no # Overridable in a profile: no + # + # Note that the contents of this section can be overridden programmatically with + # SessionBuilder.withAuthProvider or SessionBuilder.withAuthCredentials. advanced.auth-provider { - # The class of the provider. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.auth. - # - # The driver provides a single implementation out of the box: PlainTextAuthProvider, that uses - # plain-text credentials. It requires the `username` and `password` options below. - # - # You can also specify a custom class that implements AuthProvider and has a public - # constructor with a DriverContext argument. + # The class of the provider. If it is not qualified, the driver assumes that it resides in one + # of the following packages: + # - com.datastax.oss.driver.internal.core.auth + # - com.datastax.dse.driver.internal.core.auth + # + # The driver provides two implementations: + # - PlainTextAuthProvider: uses plain-text credentials. It requires the `username` and + # `password` options below. When connecting to DataStax Enterprise, an optional + # `authorization-id` can also be specified. + # For backward compatibility with previous driver versions, you can also use the class name + # "DsePlainTextAuthProvider" for this provider. + # - DseGssApiAuthProvider: provides GSSAPI authentication for DSE clusters secured with + # DseAuthenticator. See the example below and refer to the manual for detailed instructions. + # + # You can also specify a custom class that implements AuthProvider and has a public constructor + # with a DriverContext argument (to simplify this, the driver provides two abstract classes that + # can be extended: PlainTextAuthProviderBase and DseGssApiAuthProviderBase). + # + # Finally, you can configure a provider instance programmatically with + # DseSessionBuilder#withAuthProvider. In that case, it will take precedence over the + # configuration. // class = PlainTextAuthProvider - - # Sample configuration for the plain-text provider: + # + # Sample configuration for plain-text authentication providers: // username = cassandra // password = cassandra + # + # Proxy authentication: allows to login as another user or role (valid for both + # PlainTextAuthProvider and DseGssApiAuthProvider): + // authorization-id = userOrRole + # + # The settings below are only applicable to DseGssApiAuthProvider: + # + # Service name. For example, if in your dse.yaml configuration file the + # "kerberos_options/service_principal" setting is "cassandra/my.host.com@MY.REALM.COM", then set + # this option to "cassandra". If this value is not explicitly set via configuration (in an + # application.conf or programmatically), the driver will attempt to set it via a System + # property. The property should be "dse.sasl.service". For backwards compatibility with 1.x + # versions of the driver, if "dse.sasl.service" is not set as a System property, the driver will + # attempt to use "dse.sasl.protocol" as a fallback (which is the property for the 1.x driver). + //service = "cassandra" + # + # Login configuration. It is also possible to provide login configuration through a standard + # JAAS configuration file. The below configuration is just an example, see all possible options + # here: + # https://docs.oracle.com/javase/6/docs/jre/api/security/jaas/spec/com/sun/security/auth/module/Krb5LoginModule.html + // login-configuration { + // principal = "cassandra@DATASTAX.COM" + // useKeyTab = "true" + // refreshKrb5Config = "true" + // keyTab = "/path/to/keytab/file" + // } + # + # Internal SASL properties, if any, such as QOP. + // sasl-properties { + // javax.security.sasl.qop = "auth-conf" + // } } # The SSL engine factory that will initialize an SSL engine for each new connection to a server. @@ -403,6 +764,9 @@ datastax-java-driver { # Required: no. If the 'class' child option is absent, SSL won't be activated. # Modifiable at runtime: no # Overridable in a profile: no + # + # Note that the contents of this section can be overridden programmatically with + # SessionBuilder.withSslEngineFactory or SessionBuilder#withSslContext. advanced.ssl-engine-factory { # The class of the factory. If it is not qualified, the driver assumes that it resides in the # package com.datastax.oss.driver.internal.core.ssl. @@ -425,6 +789,12 @@ datastax-java-driver { # name matches the hostname of the server being connected to. If not set, defaults to true. // hostname-validation = true + # Whether or not to allow a DNS reverse-lookup of provided server addresses for SAN addresses, + # if cluster endpoints are specified as literal IPs. + # This is left as true for compatibility, but in most environments a DNS reverse-lookup should + # not be necessary to get an address that matches the server certificate SANs. + // allow-dns-reverse-lookup-san = true + # The locations and passwords used to access truststore and keystore contents. # These properties are optional. If either truststore-path or keystore-path are specified, # the driver builds an SSLContext from these files. If neither option is specified, the @@ -433,6 +803,13 @@ datastax-java-driver { // truststore-password = password123 // keystore-path = /path/to/client.keystore // keystore-password = password123 + + # The duration between attempts to reload the keystore from the contents of the file specified + # by `keystore-path`. This is mainly relevant in environments where certificates have short + # lifetimes and applications are restarted infrequently, since an expired client certificate + # will prevent new connections from being established until the application is restarted. If + # not set, defaults to not reload the keystore. + // keystore-reload-interval = 30 minutes } # The generator that assigns a microsecond timestamp to each request. @@ -478,23 +855,29 @@ datastax-java-driver { force-java-clock = false } - # A session-wide component that tracks the outcome of requests. - # - # Required: yes - # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes - # and allow child options to be changed at runtime). - # Overridable in a profile: no + # Request trackers are session-wide components that get notified of the outcome of requests. advanced.request-tracker { - # The class of the tracker. If it is not qualified, the driver assumes that it resides in the - # package com.datastax.oss.driver.internal.core.tracker. + # The list of trackers to register. # - # The driver provides the following implementations out of the box: - # - NoopRequestTracker: does nothing. + # This must be a list of class names, either fully-qualified or non-qualified; if the latter, + # the driver assumes that the class resides in the package + # com.datastax.oss.driver.internal.core.tracker. + # + # All classes specified here must implement + # com.datastax.oss.driver.api.core.tracker.RequestTracker and have a public constructor with a + # DriverContext argument. + # + # The driver provides the following implementation out of the box: # - RequestLogger: logs requests (see the parameters below). # - # You can also specify a custom class that implements RequestTracker and has a public - # constructor with a DriverContext argument. - class = NoopRequestTracker + # You can also pass instances of your trackers programmatically with + # CqlSession.builder().addRequestTracker(). + # + # Required: no + # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes + # and allow child options to be changed at runtime). + # Overridable in a profile: no + #classes = [RequestLogger,com.example.app.MyTracker] # Parameters for RequestLogger. All of them can be overridden in a profile, and changed at # runtime (the new values will be taken into account for requests logged after the change). @@ -535,6 +918,13 @@ datastax-java-driver { } } + advanced.request-id { + generator { + # The component that generates a unique identifier for each CQL request, and possibly write the id to the custom payload . + // class = W3CContextRequestIdGenerator + } + } + # A session-wide component that controls the rate at which requests are executed. # # Implementations vary, but throttlers generally track a metric that represents the level of @@ -597,39 +987,37 @@ datastax-java-driver { // drain-interval = 10 milliseconds } - # A session-wide component that listens for node state changes. If it is not qualified, the driver - # assumes that it resides in the package com.datastax.oss.driver.internal.core.metadata. + # The list of node state listeners to register. Node state listeners are session-wide + # components that listen for node state changes (e.g., when nodes go down or back up). # - # The driver provides a single no-op implementation out of the box: NoopNodeStateListener. - # - # You can also specify a custom class that implements NodeStateListener and has a public + # This must be a list of fully-qualified class names; classes specified here must implement + # com.datastax.oss.driver.api.core.metadata.NodeStateListener and have a public # constructor with a DriverContext argument. # - # Alternatively, you can pass an instance of your listener programmatically with - # CqlSession.builder().withNodeStateListener(). In that case, this option will be ignored. + # You can also pass instances of your listeners programmatically with + # CqlSession.builder().addNodeStateListener(). # - # Required: unless a listener has been provided programmatically + # Required: no # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes # and allow child options to be changed at runtime). # Overridable in a profile: no - advanced.node-state-listener.class = NoopNodeStateListener + #advanced.node-state-listener.classes = [com.example.app.MyListener1,com.example.app.MyListener2] - # A session-wide component that listens for node state changes. If it is not qualified, the driver - # assumes that it resides in the package com.datastax.oss.driver.internal.core.metadata.schema. - # - # The driver provides a single no-op implementation out of the box: NoopSchemaChangeListener. + # The list of schema change listeners to register. Schema change listeners are session-wide + # components that listen for schema changes (e.g., when tables are created or dropped). # - # You can also specify a custom class that implements SchemaChangeListener and has a public + # This must be a list of fully-qualified class names; classes specified here must implement + # com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener and have a public # constructor with a DriverContext argument. # - # Alternatively, you can pass an instance of your listener programmatically with - # CqlSession.builder().withSchemaChangeListener(). In that case, this option will be ignored. + # You can also pass instances of your listeners programmatically with + # CqlSession.builder().addSchemaChangeListener(). # - # Required: unless a listener has been provided programmatically + # Required: no # Modifiable at runtime: no (but custom implementations may elect to watch configuration changes # and allow child options to be changed at runtime). # Overridable in a profile: no - advanced.schema-change-listener.class = NoopSchemaChangeListener + #advanced.schema-change-listener.classes = [com.example.app.MyListener1,com.example.app.MyListener2] # The address translator to use to convert the addresses sent by Cassandra nodes into ones that # the driver uses to connect. @@ -645,11 +1033,33 @@ datastax-java-driver { # the package com.datastax.oss.driver.internal.core.addresstranslation. # # The driver provides the following implementations out of the box: - # - PassThroughAddressTranslator: returns all addresses unchanged + # - PassThroughAddressTranslator: returns all addresses unchanged. + # - FixedHostNameAddressTranslator: translates all addresses to a specific hostname. + # - SubnetAddressTranslator: translates addresses to hostname based on the subnet match. + # - Ec2MultiRegionAddressTranslator: suitable for an Amazon multi-region EC2 deployment where + # clients are also deployed in EC2. It optimizes network costs by favoring private IPs over + # public ones whenever possible. # # You can also specify a custom class that implements AddressTranslator and has a public # constructor with a DriverContext argument. class = PassThroughAddressTranslator + # + # This property has to be set only in case you use FixedHostNameAddressTranslator. + # advertised-hostname = mycustomhostname + # + # These properties are only applicable in case you use SubnetAddressTranslator. + # subnet-addresses { + # "100.64.0.0/15" = "cassandra.datacenter1.com:9042" + # "100.66.0.0/15" = "cassandra.datacenter2.com:9042" + # # IPv6 example: + # # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042" + # # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042" + # } + # Optional. When configured, addresses not matching the configured subnets are translated to this address. + # default-address = "cassandra.datacenter1.com:9042" + # Whether to resolve the addresses once on initialization (if true) or on each node (re-)connection (if false). + # If not configured, defaults to false. + # resolve-addresses = false } # Whether to resolve the addresses passed to `basic.contact-points`. @@ -694,7 +1104,7 @@ datastax-java-driver { # an incompatible node joins the cluster later, connection will fail and the driver will force # it down (i.e. never try to connect to it again). # - # You can check the actual version at runtime with Cluster.getContext().getProtocolVersion(). + # You can check the actual version at runtime with Session.getContext().getProtocolVersion(). # # Required: no # Modifiable at runtime: no @@ -704,8 +1114,10 @@ datastax-java-driver { # The name of the algorithm used to compress protocol frames. # # The possible values are: - # - lz4: requires net.jpountz.lz4:lz4 in the classpath. + # - lz4: requires at.yawk.lz4:lz4-java in the classpath. # - snappy: requires org.xerial.snappy:snappy-java in the classpath. + # - the string "none" to indicate no compression (this is functionally equivalent to omitting + # the option). # # The driver depends on the compression libraries, but they are optional. Make sure you # redeclare an explicit dependency in your project. Refer to the driver's POM or manual for the @@ -723,7 +1135,7 @@ datastax-java-driver { # Modifiable at runtime: yes, the new value will be used for connections created after the # change. # Overridable in a profile: no - max-frame-length = 256 MB + max-frame-length = 256 MiB } advanced.request { @@ -786,7 +1198,312 @@ datastax-java-driver { log-warnings = true } + # Graph (DataStax Enterprise only) + advanced.graph { + # The sub-protocol the driver will use to communicate with DSE Graph, on top of the Cassandra + # native protocol. + # + # You should almost never have to change this: the driver sets it automatically, based on the + # information it has about the server. One exception is if you use the script API against a + # legacy DSE version (5.0.3 or older). In that case, you need to force the sub-protocol to + # "graphson-1.0". + # + # This can also be overridden programmatically with GraphStatement.setSubProtocol(). If both are + # specified, the programmatic value takes precedence, and this option is ignored. + # + # Possible values with built-in support in the driver are: + # [ "graphson-1.0", "graphson-2.0", "graph-binary-1.0"] + # + # IMPORTANT: The default value for the Graph sub-protocol is based only on the DSE + # version. If the version is DSE 6.7 and lower, "graphson-2.0" will be the default. For DSE 6.8 + # and higher, the default value is "graphson-binary-1.0". + # + # Required: no + # Modifiable at runtime: yes, the new value will be used for requests issued after the change. + # Overridable in a profile: yes + // sub-protocol = "graphson-2.0" + + # + # Whether or not Graph paging should be enabled or disabled for all queries. + # + #

      If AUTO is set, the driver will decide whether or not to enable Graph paging + # based on the protocol version in use and the DSE version of all hosts. For this reason it is + # usually not necessary to change this setting. + # + #

      IMPORTANT: Paging for DSE Graph is only available in DSE 6.8 and higher, and + # requires protocol version DSE_V1 or higher and graphs created with the Native engine; enabling + # paging for clusters and graphs that do not meet this requirement may result in query failures. + # + # Supported values are: ENABLED, DISABLED, AUTO + paging-enabled = "AUTO" + + + paging-options { + + # The page size. + # + # The value specified here can be interpreted in number of rows. + # Interpetation in number of bytes is not supported for graph continuous paging queries. + # + # It controls how many rows will be retrieved simultaneously in a single + # network roundtrip (the goal being to avoid loading too many results in memory at the same + # time). If there are more results, additional requests will be used to retrieve them (either + # automatically if you iterate with the sync API, or explicitly with the async API's + # fetchNextPage method). + # + # The default is the same as the driver's normal request page size, + # i.e., 5000 (rows). + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + page-size = ${datastax-java-driver.advanced.continuous-paging.page-size} + + # The maximum number of pages to return. + # + # The default is zero, which means retrieve all pages. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + max-pages = ${datastax-java-driver.advanced.continuous-paging.max-pages} + + # Returns the maximum number of pages per second. + # + # The default is zero, which means no limit. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + max-pages-per-second = ${datastax-java-driver.advanced.continuous-paging.max-pages-per-second} + + # Returns the maximum number of pages per second. + # + # The default is zero, which means no limit. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + max-enqueued-pages = ${datastax-java-driver.advanced.continuous-paging.max-enqueued-pages} + } + } + + # Continuous paging (DataStax Enterprise only) + advanced.continuous-paging { + + # The page size. + # + # The value specified here can be interpreted in number of rows + # or in number of bytes, depending on the unit defined with page-unit (see below). + # + # It controls how many rows (or how much data) will be retrieved simultaneously in a single + # network roundtrip (the goal being to avoid loading too many results in memory at the same + # time). If there are more results, additional requests will be used to retrieve them (either + # automatically if you iterate with the sync API, or explicitly with the async API's + # fetchNextPage method). + # + # The default is the same as the driver's normal request page size, + # i.e., 5000 (rows). + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + page-size = ${datastax-java-driver.basic.request.page-size} + + # Whether the page-size option should be interpreted in number of rows or bytes. + # + # The default is false, i.e., the page size will be interpreted in number of rows. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + page-size-in-bytes = false + + # The maximum number of pages to return. + # + # The default is zero, which means retrieve all pages. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + max-pages = 0 + + # Returns the maximum number of pages per second. + # + # The default is zero, which means no limit. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + max-pages-per-second = 0 + + # The maximum number of pages that can be stored in the local queue. + # + # This value must be positive. The default is 4. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + max-enqueued-pages = 4 + + # Timeouts for continuous paging. + # + # Note that there is no global timeout for continuous paging as there is + # for regular queries, because continuous paging queries can take an arbitrarily + # long time to complete. + # + # Instead, timeouts are applied to each exchange between the driver and the coordinator. In + # other words, if the driver decides to retry, all timeouts are reset. + timeout { + + # How long to wait for the coordinator to send the first page. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + first-page = 2 seconds + + # How long to wait for the coordinator to send subsequent pages. + # + # Required: yes + # Modifiable at runtime: yes, the new value will be used for continuous requests issued after + # the change + # Overridable in a profile: yes + other-pages = 1 second + + } + } + + # DataStax Insights + advanced.monitor-reporting { + # Whether to send monitoring events. + # + # The default is true. + # + # Required: no (defaults to true) + # Modifiable at runtime: no + # Overridable in a profile: no + enabled = true + } + advanced.metrics { + # Metrics Factory configuration. + factory { + # The class for the metrics factory. + # + # The driver provides out-of-the-box support for three metrics libraries: Dropwizard, + # Micrometer and MicroProfile Metrics. + # + # Dropwizard is the default metrics library in the driver; to use Dropwizard, this value + # should be left to its default, "DefaultMetricsFactory", or set to + # "DropwizardMetricsFactory". The only difference between the two is that the former will work + # even if Dropwizard is not present on the classpath (in which case it will silently disable + # metrics), while the latter requires its presence. + # + # To select Micrometer, set the value to "MicrometerMetricsFactory", and to select + # MicroProfile Metrics, set the value to "MicroProfileMetricsFactory". For these libraries to + # be used, you will also need to add an additional dependency: + # - Micrometer: org.apache.cassandra:java-driver-metrics-micrometer + # - MicroProfile: org.apache.cassandra:java-driver-metrics-microprofile + # + # If you would like to use another metrics library, set this value to the fully-qualified name + # of a class that implements com.datastax.oss.driver.internal.core.metrics.MetricsFactory. + # + # It is also possible to use "NoopMetricsFactory", which forcibly disables metrics completely. + # In fact, "DefaultMetricsFactory" delegates to "DropwizardMetricsFactory" if Dropwizard is + # present on the classpath, or to "NoopMetricsFactory" if it isn't. + # + # Note: specifying a metrics factory is not enough to enable metrics; for the driver to + # actually start collecting metrics, you also need to specify which metrics to collect. See + # the following options for more information: + # - advanced.metrics.session.enabled + # - advanced.metrics.node.enabled + # + # See also the driver online manual for extensive instructions about how to configure metrics. + # + # Required: yes + # Modifiable at runtime: no + # Overridable in a profile: no + class = DefaultMetricsFactory + } + + # This section configures how metric ids are generated. A metric id is a unique combination of + # a metric name and metric tags. + id-generator { + + # The class name of a component implementing + # com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator. If it is not qualified, the + # driver assumes that it resides in the package com.datastax.oss.driver.internal.core.metrics. + # + # The driver ships with two built-in implementations: + # + # - DefaultMetricIdGenerator: generates identifiers composed solely of (unique) metric names; + # it does not generate tags. It is mostly suitable for use with metrics libraries that do + # not support tags, like Dropwizard. + # - TaggingMetricIdGenerator: generates identifiers composed of name and tags. It is mostly + # suitable for use with metrics libraries that support tags, like Micrometer or MicroProfile + # Metrics. + # + # For example, here is how each one of them generates identifiers for the session metric + # "bytes-sent", assuming that the session is named "s0": + # - DefaultMetricIdGenerator: name "s0.bytes-sent", tags: {}. + # - TaggingMetricIdGenerator: name "session.bytes-sent", tags: {"session":"s0"} + # + # Here is how each one of them generates identifiers for the node metric "bytes-sent", + # assuming that the session is named "s0", and the node's broadcast address is 10.1.2.3:9042: + # - DefaultMetricIdGenerator: name "s0.nodes.10_1_2_3:9042.bytes-sent", tags: {}. + # - TaggingMetricIdGenerator: name "nodes.bytes-sent", tags: { "session" : "s0", + # "node" : "\10.1.2.3:9042" } + # + # As shown above, both built-in implementations generate names that are path-like structures + # separated by dots. This is indeed the most common expected format by reporting tools. + # + # Required: yes + # Modifiable at runtime: no + # Overridable in a profile: no + class = DefaultMetricIdGenerator + + # An optional prefix to prepend to each generated metric name. + # + # The prefix should not start nor end with a dot or any other path separator; the following + # are two valid examples: "cassandra" or "myapp.prod.cassandra". + # + # For example, if this prefix is set to "cassandra", here is how the session metric + # "bytes-sent" would be named, assuming that the session is named "s0": + # - with DefaultMetricIdGenerator: "cassandra.s0.bytes-sent" + # - with TaggingMetricIdGenerator: "cassandra.session.bytes-sent" + # + # Here is how the node metric "bytes-sent" would be named, assuming that the session is named + # "s0", and the node's broadcast address is 10.1.2.3:9042: + # - with DefaultMetricIdGenerator: "cassandra.s0.nodes.10_1_2_3:9042.bytes-sent" + # - with TaggingMetricIdGenerator: "cassandra.nodes.bytes-sent" + # + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + // prefix = "cassandra" + } + + histograms { + # Adds histogram buckets used to generate aggregable percentile approximations in monitoring + # systems that have query facilities to do so (e.g. Prometheus histogram_quantile, Atlas percentiles). + # + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + generate-aggregable = true + } + # The session-level metrics (all disabled by default). # # Required: yes @@ -794,10 +1511,12 @@ datastax-java-driver { # Overridable in a profile: no session { enabled = [ - # The number and rate of bytes sent for the entire session (exposed as a Meter). + # The number and rate of bytes sent for the entire session (exposed as a Meter if available, + # otherwise as a Counter). // bytes-sent, - # The number and rate of bytes received for the entire session (exposed as a Meter). + # The number and rate of bytes received for the entire session (exposed as a Meter if + # available, otherwise as a Counter). // bytes-received # The number of nodes to which the driver has at least one active connection (exposed as a @@ -814,7 +1533,7 @@ datastax-java-driver { # with a DriverTimeoutException (exposed as a Counter). // cql-client-timeouts, - # The size of the driver-side cache of CQL prepared statements. + # The size of the driver-side cache of CQL prepared statements (exposed as a Gauge). # # The cache uses weak values eviction, so this represents the number of PreparedStatement # instances that your application has created, and is still holding a reference to. Note @@ -837,14 +1556,47 @@ datastax-java-driver { # The number of times a request was rejected with a RequestThrottlingException (exposed as # a Counter) // throttling.errors, + + # The throughput and latency percentiles of DSE continuous CQL requests (exposed as a + # Timer). + # + # This metric is a session-level metric and corresponds to the overall duration of the + # session.executeContinuously() call, including any retry. + # + # Note that this metric is analogous to the OSS driver's 'cql-requests' metrics, but for + # continuous paging requests only. Continuous paging requests do not update the + # 'cql-requests' metric, because they are usually much longer. Only the following metrics + # are updated during a continuous paging request: + # + # - At node level: all the usual metrics available for normal CQL requests, such as + # 'cql-messages' and error-related metrics (but these are only updated for the first + # page of results); + # - At session level: only 'continuous-cql-requests' is updated (this metric). + // continuous-cql-requests, + + # The throughput and latency percentiles of Graph requests (exposed as a Timer). + # + # This metric is a session-level metric and corresponds to the overall duration of the + # session.execute(GraphStatement) call, including any retry. + // graph-requests, + + # The number of graph requests that timed out -- that is, the + # session.execute(GraphStatement) call failed with a DriverTimeoutException (exposed as a + # Counter). + # + # Note that this metric is analogous to the OSS driver's 'cql-client-timeouts' metrics, but + # for Graph requests only. + // graph-client-timeouts + ] # Extra configuration (for the metrics that need it) - # Required: if the 'cql-requests' metric is enabled + # Required: if the 'cql-requests' metric is enabled, and Dropwizard or Micrometer is used. # Modifiable at runtime: no # Overridable in a profile: no cql-requests { + # The largest latency that we expect to record. # # This should be slightly higher than request.timeout (in theory, readings can't be higher @@ -852,14 +1604,21 @@ datastax-java-driver { # # This is used to scale internal data structures. If a higher recording is encountered at # runtime, it is discarded and a warning is logged. + # Valid for: Dropwizard, Micrometer. highest-latency = 3 seconds + # The shortest latency that we expect to record. This is used to scale internal data + # structures. + # Valid for: Micrometer. + lowest-latency = 1 millisecond + # The number of significant decimal digits to which internal structures will maintain # value resolution and separation (for example, 3 means that recordings up to 1 second # will be recorded with a resolution of 1 millisecond or better). # - # This must be between 0 and 5. If the value is out of range, it defaults to 3 and a - # warning is logged. + # For Dropwizard, this must be between 0 and 5. If the value is out of range, it defaults to + # 3 and a warning is logged. + # Valid for: Dropwizard, Micrometer. significant-digits = 3 # The interval at which percentile data is refreshed. @@ -878,16 +1637,58 @@ datastax-java-driver { # # Note that this does not apply to the total count and rates (those are updated in real # time). + # Valid for: Dropwizard. refresh-interval = 5 minutes + + # An optional list of latencies to track as part of the application's service-level + # objectives (SLOs). + # + # If defined, the histogram is guaranteed to contain these boundaries alongside other + # buckets used to generate aggregable percentile approximations. + # Valid for: Micrometer. + // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] + + # An optional list of percentiles to be published by Micrometer. Produces an additional time series for each requested percentile. + # This percentile is computed locally, and so can't be aggregated with percentiles computed across other dimensions (e.g. in a different instance) + # Valid for: Micrometer. + // publish-percentiles = [ 0.75, 0.95, 0.99 ] } - # Required: if the 'throttling.delay' metric is enabled + # Required: if the 'throttling.delay' metric is enabled, and Dropwizard or Micrometer is used. # Modifiable at runtime: no # Overridable in a profile: no throttling.delay { highest-latency = 3 seconds + lowest-latency = 1 millisecond + significant-digits = 3 + refresh-interval = 5 minutes + // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] + // publish-percentiles = [ 0.75, 0.95, 0.99 ] + } + + # Required: if the 'continuous-cql-requests' metric is enabled, and Dropwizard or Micrometer + # is used. + # Modifiable at runtime: no + # Overridable in a profile: no + continuous-cql-requests { + highest-latency = 120 seconds + lowest-latency = 10 milliseconds significant-digits = 3 refresh-interval = 5 minutes + // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] + // publish-percentiles = [ 0.75, 0.95, 0.99 ] + } + + # Required: if the 'graph-requests' metric is enabled, and Dropwizard or Micrometer is used. + # Modifiable at runtime: no + # Overridable in a profile: no + graph-requests { + highest-latency = 12 seconds + lowest-latency = 1 millisecond + significant-digits = 3 + refresh-interval = 5 minutes + // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] + // publish-percentiles = [ 0.75, 0.95, 0.99 ] } } # The node-level metrics (all disabled by default). @@ -923,10 +1724,12 @@ datastax-java-driver { # See the description of the connection.max-orphan-requests option for more details. // pool.orphaned-streams, - # The number and rate of bytes sent to this node (exposed as a Meter). + # The number and rate of bytes sent to this node (exposed as a Meter if available, otherwise + # as a Counter). // bytes-sent, - # The number and rate of bytes received from this node (exposed as a Meter). + # The number and rate of bytes received from this node (exposed as a Meter if available, + # otherwise as a Counter). // bytes-received, # The throughput and latency percentiles of individual CQL messages sent to this node as @@ -1025,18 +1828,61 @@ datastax-java-driver { # to this node (exposed as a Counter). # Authentication errors are also logged at WARN level. // errors.connection.auth, + + # The throughput and latency percentiles of individual graph messages sent to this node as + # part of an overall request (exposed as a Timer). + # + # Note that this does not necessarily correspond to the overall duration of the + # session.execute() call, since the driver might query multiple nodes because of retries + # and speculative executions. Therefore a single "request" (as seen from a client of the + # driver) can be composed of more than one of the "messages" measured by this metric. + # + # Therefore this metric is intended as an insight into the performance of this particular + # node. For statistics on overall request completion, use the session-level graph-requests. + // graph-messages, ] # See cql-requests in the `session` section # - # Required: if the 'cql-messages' metric is enabled + # Required: if the 'cql-messages' metric is enabled, and Dropwizard or Micrometer is used. # Modifiable at runtime: no # Overridable in a profile: no cql-messages { highest-latency = 3 seconds + lowest-latency = 1 millisecond significant-digits = 3 refresh-interval = 5 minutes + // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] + // publish-percentiles = [ 0.75, 0.95, 0.99 ] } + + # See graph-requests in the `session` section + # + # Required: if the 'graph-messages' metric is enabled, and Dropwizard or Micrometer is used. + # Modifiable at runtime: no + # Overridable in a profile: no + graph-messages { + highest-latency = 3 seconds + lowest-latency = 1 millisecond + significant-digits = 3 + refresh-interval = 5 minutes + // slo = [ 100 milliseconds, 500 milliseconds, 1 second ] + // publish-percentiles = [ 0.75, 0.95, 0.99 ] + } + + # The time after which the node level metrics will be evicted. + # + # This is used to unregister stale metrics if a node leaves the cluster or gets a new address. + # If the node does not come back up when this interval elapses, all its metrics are removed + # from the registry. + # + # The lowest allowed value is 5 minutes. If you try to set it lower, the driver will log a + # warning and use 5 minutes. + # + # Required: yes + # Modifiable at runtime: no + # Overridable in a profile: no + expire-after = 1 hour } } @@ -1161,7 +2007,7 @@ datastax-java-driver { max-events = 20 } - # Options relating to schema metadata (Cluster.getMetadata.getKeyspaces). + # Options relating to schema metadata (Session.getMetadata.getKeyspaces). # This metadata is exposed by the driver for informational purposes, and is also necessary for # token-aware routing. schema { @@ -1170,18 +2016,45 @@ datastax-java-driver { # # Required: yes # Modifiable at runtime: yes, the new value will be used for refreshes issued after the - # change. It can also be overridden programmatically via Cluster.setSchemaMetadataEnabled. + # change. It can also be overridden programmatically via Session.setSchemaMetadataEnabled. # Overridable in a profile: no enabled = true - # The list of keyspaces for which schema and token metadata should be maintained. If this - # property is absent or empty, all existing keyspaces are processed. + # The keyspaces for which schema and token metadata should be maintained. # - # Required: no + # Each element can be one of the following: + # 1. An exact name inclusion, for example "Ks1". If the name is case-sensitive, it must appear + # in its exact case. + # 2. An exact name exclusion, for example "!Ks1". + # 3. A regex inclusion, enclosed in slashes, for example "/^Ks.*/". The part between the + # slashes must follow the syntax rules of java.util.regex.Pattern. + # 4. A regex exclusion, for example "!/^Ks.*/". + # + # If the list is empty, or the option is unset, all keyspaces will match. Otherwise: + # + # If a keyspace matches an exact name inclusion, it is always included, regardless of what any + # other rule says. + # Otherwise, if it matches an exact name exclusion, it is always excluded, regardless of what + # any regex rule says. + # Otherwise, if there are regex rules: + # - if they're only inclusions, the keyspace must match at least one of them. + # - if they're only exclusions, the keyspace must match none of them. + # - if they're both, the keyspace must match at least one inclusion and none of the + # exclusions. + # + # If an element is malformed, or if its regex has a syntax error, a warning is logged and that + # single element is ignored. + # + # Try to use only exact name inclusions if possible. This allows the driver to filter on the + # server side with a WHERE IN clause. If you use any other rule, it has to fetch all system + # rows and filter on the client side. + # + # Required: no. The default value excludes all Cassandra and DSE system keyspaces. If the + # option is unset, this is interpreted as "include all keyspaces". # Modifiable at runtime: yes, the new value will be used for refreshes issued after the # change. # Overridable in a profile: no - // refreshed-keyspaces = [ "ks1", "ks2" ] + refreshed-keyspaces = [ "!system", "!/^system_.*/", "!/^dse_.*/", "!solr_admin", "!OpsCenter" ] # The timeout for the requests to the schema tables. # @@ -1222,7 +2095,7 @@ datastax-java-driver { } } - # Whether token metadata (Cluster.getMetadata.getTokenMap) is enabled. + # Whether token metadata (Session.getMetadata.getTokenMap) is enabled. # This metadata is exposed by the driver for informational purposes, and is also necessary for # token-aware routing. # If this is false, it will remain empty, or to the last known value. Note that its computation @@ -1374,10 +2247,31 @@ datastax-java-driver { # Overridable in a profile: no timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} } + + # How to build the cache of prepared statements. + prepared-cache { + # Whether to use weak references for the prepared statements cache values. + # + # If this option is absent, weak references will be used. + # + # Required: no + # Modifiable at runtime: no + # Overridable in a profile: no + // weak-values = true + } } # Options related to the Netty event loop groups used internally by the driver. advanced.netty { + + # Whether the threads created by the driver should be daemon threads. + # This will apply to the threads in io-group, admin-group, and the timer thread. + # + # Required: yes + # Modifiable at runtime: no + # Overridable in a profile: no + daemon = false + # The event loop group used for I/O operations (reading and writing to Cassandra nodes). # By default, threads in this group are named after the session name, "-io-" and an incrementing # counter, for example "s0-io-0". @@ -1451,13 +2345,6 @@ datastax-java-driver { # This is exposed mainly to facilitate tuning during development. You shouldn't have to adjust # this. advanced.coalescer { - # How many times the coalescer is allowed to reschedule itself when it did no work. - # - # Required: yes - # Modifiable at runtime: no - # Overridable in a profile: no - max-runs-with-no-work = 5 - # The reschedule interval. # # Required: yes @@ -1471,5 +2358,20 @@ datastax-java-driver { # olap { # basic.request.timeout = 5 seconds # } + + # An example configuration profile for graph requests. + // my-graph-profile-example { + // graph { + // read-consistency-level = LOCAL_QUORUM + // write-consistency-level = LOCAL_ONE + // } + // } + + # An example pre-defined configuration profile for OLAP graph queries. + // graph-olap { + // graph { + // traversal-source = "a" // traversal source needs to be set to "a" for OLAP queries. + // } + // } } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/datastax/dse/driver/Assertions.java b/core/src/test/java/com/datastax/dse/driver/Assertions.java new file mode 100644 index 00000000000..09f7b281f84 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/Assertions.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver; + +import org.apache.tinkerpop.gremlin.structure.io.Buffer; + +public class Assertions extends org.assertj.core.api.Assertions { + public static TinkerpopBufferAssert assertThat(Buffer actual) { + return new TinkerpopBufferAssert(actual); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java b/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java new file mode 100644 index 00000000000..65e58878dbc --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/DriverRunListener.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver; + +import static org.assertj.core.api.Assertions.fail; + +import org.junit.runner.Description; +import org.junit.runner.notification.RunListener; + +/** + * Common parent of all driver tests, to store common configuration and perform sanity checks. + * + * @see "maven-surefire-plugin configuration in pom.xml" + */ +public class DriverRunListener extends RunListener { + + @Override + public void testFinished(Description description) throws Exception { + // If a test interrupted the main thread silently, this can make later tests fail. Instead, we + // fail the test and clear the interrupt status. + // Note: Thread.interrupted() also clears the flag, which is what we want. + if (Thread.interrupted()) { + fail(description.getMethodName() + " interrupted the main thread"); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java b/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java new file mode 100644 index 00000000000..7d9aecc28ed --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/DseTestDataProviders.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver; + +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.tngtech.java.junit.dataprovider.DataProvider; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Arrays; +import java.util.stream.Stream; + +public class DseTestDataProviders { + + private static final ScriptGraphStatement UNDEFINED_IDEMPOTENCE_STATEMENT = + ScriptGraphStatement.newInstance("undefined idempotence"); + private static final ScriptGraphStatement IDEMPOTENT_STATEMENT = + ScriptGraphStatement.builder("idempotent").setIdempotence(true).build(); + private static final ScriptGraphStatement NON_IDEMPOTENT_STATEMENT = + ScriptGraphStatement.builder("non idempotent").setIdempotence(false).build(); + + @DataProvider + public static Object[][] allDseProtocolVersions() { + return concat(DseProtocolVersion.values()); + } + + @DataProvider + public static Object[][] allOssProtocolVersions() { + return concat(DefaultProtocolVersion.values()); + } + + @DataProvider + public static Object[][] allDseAndOssProtocolVersions() { + return concat(DefaultProtocolVersion.values(), DseProtocolVersion.values()); + } + + @DataProvider + public static Object[][] supportedGraphProtocols() { + return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; + } + + /** + * The combination of the default idempotence option and statement setting that produce an + * idempotent statement. + */ + @DataProvider + public static Object[][] idempotentGraphConfig() { + return new Object[][] { + new Object[] {true, UNDEFINED_IDEMPOTENCE_STATEMENT}, + new Object[] {false, IDEMPOTENT_STATEMENT}, + new Object[] {true, IDEMPOTENT_STATEMENT}, + }; + } + + /** + * The combination of the default idempotence option and statement setting that produce a non + * idempotent statement. + */ + @DataProvider + public static Object[][] nonIdempotentGraphConfig() { + return new Object[][] { + new Object[] {false, UNDEFINED_IDEMPOTENCE_STATEMENT}, + new Object[] {true, NON_IDEMPOTENT_STATEMENT}, + new Object[] {false, NON_IDEMPOTENT_STATEMENT}, + }; + } + + @DataProvider + public static Object[][] allDseProtocolVersionsAndSupportedGraphProtocols() { + return TestDataProviders.combine(allDseProtocolVersions(), supportedGraphProtocols()); + } + + @NonNull + private static Object[][] concat(Object[]... values) { + return Stream.of(values) + .flatMap(Arrays::stream) + .map(o -> new Object[] {o}) + .toArray(Object[][]::new); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java b/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java new file mode 100644 index 00000000000..7992dde4fea --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/DseTestFixtures.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.metadata.MetadataManager; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.response.result.ColumnSpec; +import com.datastax.oss.protocol.internal.response.result.DefaultRows; +import com.datastax.oss.protocol.internal.response.result.RawType; +import com.datastax.oss.protocol.internal.response.result.Rows; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; +import java.util.ArrayDeque; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.UUID; + +public class DseTestFixtures { + + // Returns a single row, with a single "message" column with the value "hello, world" + public static Rows singleDseRow() { + DseRowsMetadata metadata = + new DseRowsMetadata( + ImmutableList.of( + new ColumnSpec( + "ks", + "table", + "message", + 0, + RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), + null, + new int[] {}, + null, + 1, + true); + Queue> data = new ArrayDeque<>(); + data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); + return new DefaultRows(metadata, data); + } + + // Returns 10 rows, each with a single "message" column with the value "hello, world" + public static Rows tenDseRows(int page, boolean last) { + DseRowsMetadata metadata = + new DseRowsMetadata( + ImmutableList.of( + new ColumnSpec( + "ks", + "table", + "message", + 0, + RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), + last ? null : ByteBuffer.wrap(new byte[] {(byte) page}), + new int[] {}, + null, + page, + last); + Queue> data = new ArrayDeque<>(); + for (int i = 0; i < 10; i++) { + data.add(ImmutableList.of(Bytes.fromHexString("0x68656C6C6F2C20776F726C64"))); + } + return new DefaultRows(metadata, data); + } + + public static DefaultDriverContext mockNodesInMetadataWithVersions( + DefaultDriverContext mockContext, boolean treatNullAsMissing, Version... dseVersions) { + + // mock bits of the context + MetadataManager metadataManager = mock(MetadataManager.class); + Metadata metadata = mock(Metadata.class); + Map nodeMap = new HashMap<>((dseVersions != null) ? dseVersions.length : 1); + if (dseVersions == null) { + Node node = mock(Node.class); + Map nodeExtras = new HashMap<>(1); + if (!treatNullAsMissing) { + // put an explicit null in for DSE_VERSION + nodeExtras.put(DseNodeProperties.DSE_VERSION, null); + } + nodeMap.put(UUID.randomUUID(), node); + when(node.getExtras()).thenReturn(nodeExtras); + } else { + for (Version dseVersion : dseVersions) { + // create a node with DSE version in its extra data + Node node = mock(Node.class); + Map nodeExtras = new HashMap<>(1); + if (dseVersion != null || !treatNullAsMissing) { + nodeExtras.put(DseNodeProperties.DSE_VERSION, dseVersion); + } + nodeMap.put(UUID.randomUUID(), node); + when(node.getExtras()).thenReturn(nodeExtras); + } + } + // return mocked data when requested + when(metadata.getNodes()).thenReturn(nodeMap); + when(metadataManager.getMetadata()).thenReturn(metadata); + when(mockContext.getMetadataManager()).thenReturn(metadataManager); + return mockContext; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java b/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java new file mode 100644 index 00000000000..278e5a65070 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/TinkerpopBufferAssert.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.protocol.internal.util.Bytes; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.assertj.core.api.AbstractAssert; + +public class TinkerpopBufferAssert extends AbstractAssert { + public TinkerpopBufferAssert(Buffer actual) { + super(actual, TinkerpopBufferAssert.class); + } + + public TinkerpopBufferAssert containsExactly(String hexString) { + + byte[] expectedBytes = Bytes.fromHexString(hexString).array(); + byte[] actualBytes = new byte[expectedBytes.length]; + actual.readBytes(actualBytes); + assertThat(actualBytes).containsExactly(expectedBytes); + assertThat(actual.readableBytes()).isEqualTo(0); + return this; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java new file mode 100644 index 00000000000..4cf8d43b748 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangePrecisionTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.time; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.ZonedDateTime; +import org.junit.Test; + +public class DateRangePrecisionTest { + + @Test + public void should_round_up() { + ZonedDateTime timestamp = ZonedDateTime.parse("2011-02-03T04:05:16.789Z"); + assertThat(DateRangePrecision.MILLISECOND.roundUp(timestamp)) + .isEqualTo("2011-02-03T04:05:16.789Z"); + assertThat(DateRangePrecision.SECOND.roundUp(timestamp)).isEqualTo("2011-02-03T04:05:16.999Z"); + assertThat(DateRangePrecision.MINUTE.roundUp(timestamp)).isEqualTo("2011-02-03T04:05:59.999Z"); + assertThat(DateRangePrecision.HOUR.roundUp(timestamp)).isEqualTo("2011-02-03T04:59:59.999Z"); + assertThat(DateRangePrecision.DAY.roundUp(timestamp)).isEqualTo("2011-02-03T23:59:59.999Z"); + assertThat(DateRangePrecision.MONTH.roundUp(timestamp)).isEqualTo("2011-02-28T23:59:59.999Z"); + assertThat(DateRangePrecision.YEAR.roundUp(timestamp)).isEqualTo("2011-12-31T23:59:59.999Z"); + } + + @Test + public void should_round_down() { + ZonedDateTime timestamp = ZonedDateTime.parse("2011-02-03T04:05:16.789Z"); + assertThat(DateRangePrecision.MILLISECOND.roundDown(timestamp)) + .isEqualTo("2011-02-03T04:05:16.789Z"); + assertThat(DateRangePrecision.SECOND.roundDown(timestamp)) + .isEqualTo("2011-02-03T04:05:16.000Z"); + assertThat(DateRangePrecision.MINUTE.roundDown(timestamp)) + .isEqualTo("2011-02-03T04:05:00.000Z"); + assertThat(DateRangePrecision.HOUR.roundDown(timestamp)).isEqualTo("2011-02-03T04:00:00.000Z"); + assertThat(DateRangePrecision.DAY.roundDown(timestamp)).isEqualTo("2011-02-03T00:00:00.000Z"); + assertThat(DateRangePrecision.MONTH.roundDown(timestamp)).isEqualTo("2011-02-01T00:00:00.000Z"); + assertThat(DateRangePrecision.YEAR.roundDown(timestamp)).isEqualTo("2011-01-01T00:00:00.000Z"); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java new file mode 100644 index 00000000000..b067c12cad0 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.time; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.internal.SerializationHelper; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.text.ParseException; +import java.time.temporal.ChronoField; +import java.util.function.Predicate; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class DateRangeTest { + + @Test + @UseDataProvider("rangeStrings") + public void should_parse_and_format(String source) throws Exception { + DateRange parsed = DateRange.parse(source); + assertThat(parsed.toString()).isEqualTo(source); + } + + @DataProvider + public static Object[][] rangeStrings() { + return new Object[][] { + {"[2011-01 TO 2015]"}, + {"[2010-01-02 TO 2015-05-05T13]"}, + {"[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]"}, + // leap year + {"[2010-01-01T15 TO 2016-02]"}, + // pre-epoch + {"[1500 TO 1501]"}, + {"[0001 TO 0001-01-02]"}, + {"[0000 TO 0000-01-02]"}, + {"[-0001 TO -0001-01-02]"}, + // unbounded + {"[* TO 2014-12-01]"}, + {"[1999 TO *]"}, + {"[* TO *]"}, + // single bound ranges + // AD/BC era boundary + {"0001-01-01"}, + {"-0001-01-01"}, + {"-0009"}, + {"2000-11"}, + {"*"} + }; + } + + @Test + public void should_use_proleptic_parser() throws Exception { + DateRange parsed = DateRange.parse("[0000 TO 0000-01-02]"); + assertThat(parsed.getLowerBound().getTimestamp().get(ChronoField.YEAR)).isEqualTo(0); + } + + @Test + public void should_fail_to_parse_invalid_strings() { + assertThatThrownBy(() -> DateRange.parse("foo")).matches(hasOffset(0)); + assertThatThrownBy(() -> DateRange.parse("[foo TO *]")).matches(hasOffset(1)); + assertThatThrownBy(() -> DateRange.parse("[* TO foo]")).matches(hasOffset(6)); + } + + private static Predicate hasOffset(int offset) { + return e -> ((ParseException) e).getErrorOffset() == offset; + } + + @Test + public void should_fail_to_parse_inverted_range() { + assertThatThrownBy(() -> DateRange.parse("[2001-01 TO 2000]")) + .hasMessage( + "Lower bound of a date range should be before upper bound, got: [2001-01 TO 2000]"); + } + + @Test + public void should_not_equate_single_date_open_to_both_open_range() throws Exception { + assertThat(DateRange.parse("*")).isNotEqualTo(DateRange.parse("[* TO *]")); + } + + @Test + public void should_not_equate_same_ranges_with_different_precisions() throws ParseException { + assertThat(DateRange.parse("[2001 TO 2002]")) + .isNotEqualTo(DateRange.parse("[2001-01 TO 2002-12]")); + } + + @Test + public void should_give_same_hashcode_to_equal_objects() throws ParseException { + assertThat(DateRange.parse("[2001 TO 2002]").hashCode()) + .isEqualTo(DateRange.parse("[2001 TO 2002]").hashCode()); + } + + @Test + public void should_serialize_and_deserialize() throws Exception { + DateRange initial = DateRange.parse("[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]"); + DateRange deserialized = SerializationHelper.serializeAndDeserialize(initial); + assertThat(deserialized).isEqualTo(initial); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java new file mode 100644 index 00000000000..a890720a3ef --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/CqlCollectionTest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.predicates; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.junit.Test; + +public class CqlCollectionTest { + + @Test + public void should_evaluate_contains() { + P> contains = CqlCollection.contains("foo"); + assertThat(contains.test(new HashSet<>())).isFalse(); + assertThat(contains.test(new ArrayList<>())).isFalse(); + assertThat(contains.test(Sets.newHashSet("foo"))).isTrue(); + assertThat(contains.test(Lists.newArrayList("foo"))).isTrue(); + assertThat(contains.test(Sets.newHashSet("bar"))).isFalse(); + assertThat(contains.test(Lists.newArrayList("bar"))).isFalse(); + assertThatThrownBy(() -> contains.test(null)).isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> CqlCollection.contains(null).test(Sets.newHashSet("foo"))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_evaluate_containsKey() { + P> containsKey = CqlCollection.containsKey("foo"); + assertThat(containsKey.test(new HashMap<>())).isFalse(); + assertThat(containsKey.test(new LinkedHashMap<>())).isFalse(); + assertThat(containsKey.test(ImmutableMap.of("foo", "bar"))).isTrue(); + assertThat(containsKey.test(ImmutableMap.of("bar", "foo"))).isFalse(); + assertThatThrownBy(() -> containsKey.test(null)).isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> CqlCollection.containsKey(null).test(ImmutableMap.of("foo", "bar"))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_evaluate_containsValue() { + P> containsValue = CqlCollection.containsValue("foo"); + assertThat(containsValue.test(new HashMap<>())).isFalse(); + assertThat(containsValue.test(new LinkedHashMap<>())).isFalse(); + assertThat(containsValue.test(ImmutableMap.of("bar", "foo"))).isTrue(); + assertThat(containsValue.test(ImmutableMap.of("foo", "bar"))).isFalse(); + assertThatThrownBy(() -> containsValue.test(null)).isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> CqlCollection.containsValue(null).test(ImmutableMap.of("foo", "bar"))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_evaluate_entryEq() { + P> entryEq = CqlCollection.entryEq("foo", "bar"); + assertThat(entryEq.test(new HashMap<>())).isFalse(); + assertThat(entryEq.test(new LinkedHashMap<>())).isFalse(); + assertThat(entryEq.test(ImmutableMap.of("foo", "bar"))).isTrue(); + assertThat(entryEq.test(ImmutableMap.of("bar", "foo"))).isFalse(); + assertThatThrownBy(() -> entryEq.test(null)).isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> CqlCollection.entryEq(null, "foo").test(ImmutableMap.of("foo", "bar"))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> CqlCollection.entryEq("foo", null).test(ImmutableMap.of("foo", "bar"))) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java new file mode 100644 index 00000000000..143aec97b78 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/GeoTest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.predicates; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.junit.Test; + +public class GeoTest { + + @Test + public void should_convert_units_to_degrees() { + assertThat(Geo.Unit.DEGREES.toDegrees(100.0)).isEqualTo(100.0); + assertThat(Geo.Unit.MILES.toDegrees(68.9722)).isEqualTo(0.9982455747535043); + assertThat(Geo.Unit.KILOMETERS.toDegrees(111.0)).isEqualTo(0.9982456082154465); + assertThat(Geo.Unit.METERS.toDegrees(111000.0)).isEqualTo(0.9982456082154464); + } + + @Test + public void should_test_if_point_is_inside_circle_with_cartesian_coordinates() { + P inside = Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951); + assertThat(inside.test(Point.fromCoordinates(40, 40))).isTrue(); + assertThat(inside.test(Point.fromCoordinates(40.1, 40))).isFalse(); + } + + @Test + public void should_test_if_point_is_inside_circle_with_geo_coordinates() { + P inside = + Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES); + assertThat(inside.test(Point.fromCoordinates(40, 40))).isTrue(); + assertThat(inside.test(Point.fromCoordinates(40.1, 40))).isFalse(); + } + + @Test + public void should_test_if_point_is_inside_polygon() { + P inside = + Geo.inside( + Polygon.builder() + .addRing( + Point.fromCoordinates(30, 30), + Point.fromCoordinates(40, 40), + Point.fromCoordinates(40, 30)) + .build()); + assertThat(inside.test(Point.fromCoordinates(35, 32))).isTrue(); + assertThat(inside.test(Point.fromCoordinates(33, 37))).isFalse(); + } + + @Test + public void should_build_line_string_from_coordinates() { + LineString lineString = Geo.lineString(1, 2, 3, 4, 5, 6); + assertThat(lineString.getPoints()) + .hasSize(3) + .contains(Point.fromCoordinates(1, 2)) + .contains(Point.fromCoordinates(3, 4)) + .contains(Point.fromCoordinates(5, 6)); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_build_line_string_if_not_enough_coordinates() { + Geo.lineString(1, 2); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_build_line_string_if_uneven_number_of_coordinates() { + Geo.lineString(1, 2, 3, 4, 5); + } + + @Test + public void should_build_polygon_from_coordinates() { + Polygon polygon = Geo.polygon(1, 2, 3, 4, 5, 6, 7, 8); + assertThat(polygon.getExteriorRing()) + .hasSize(4) + .contains(Point.fromCoordinates(1, 2)) + .contains(Point.fromCoordinates(3, 4)) + .contains(Point.fromCoordinates(5, 6)) + .contains(Point.fromCoordinates(7, 8)); + assertThat(polygon.getInteriorRings()).isEmpty(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_build_polygon_if_not_enough_coordinates() { + Geo.polygon(1, 2, 3, 4); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_build_polygon_if_uneven_number_of_coordinates() { + Geo.polygon(1, 2, 3, 4, 5, 6, 7); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java new file mode 100644 index 00000000000..591269e31ad --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/api/core/graph/predicates/SearchTest.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.predicates; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.junit.Test; + +public class SearchTest { + + @Test + public void testToken() { + P p = Search.token("needle"); + assertThat(p.test("needle")).isTrue(); + assertThat(p.test("This is a needle in a haystack")).isTrue(); + assertThat(p.test("This is just the haystack")).isFalse(); + } + + @Test + public void testPrefix() { + P p = Search.prefix("abcd"); + assertThat(p.test("abcd")).isTrue(); + assertThat(p.test("abcdefg hijkl")).isTrue(); + assertThat(p.test("zabcd")).isFalse(); + } + + @Test + public void testTokenPrefix() { + P p = Search.tokenPrefix("abcd"); + assertThat(p.test("abcd")).isTrue(); + assertThat(p.test("abcdefg hijkl")).isTrue(); + assertThat(p.test("z abcd")).isTrue(); + assertThat(p.test("ab cd")).isFalse(); + } + + @Test + public void testRegex() { + P p = Search.regex("(foo|bar)"); + assertThat(p.test("foo")).isTrue(); + assertThat(p.test("bar")).isTrue(); + assertThat(p.test("foo bar")).isFalse(); + } + + @Test + public void testTokenRegex() { + P p = Search.tokenRegex("(foo|bar)"); + assertThat(p.test("foo")).isTrue(); + assertThat(p.test("bar")).isTrue(); + assertThat(p.test("foo bar")).isTrue(); + assertThat(p.test("foo bar qix")).isTrue(); + assertThat(p.test("qix")).isFalse(); + } + + @Test + public void testPhrase() { + P p = Search.phrase("Hello world", 2); + assertThat(p.test("Hello World")).isTrue(); + assertThat(p.test("Hello Big World")).isTrue(); + assertThat(p.test("Hello Big Wild World")).isTrue(); + assertThat(p.test("Hello The Big Wild World")).isFalse(); + assertThat(p.test("Goodbye world")).isFalse(); + } + + @Test + public void testPhraseFragment() { + // Tests JAVA-1744 + P p = Search.phrase("a b", 0); + assertThat(p.test("a b")).isTrue(); + assertThat(p.test("a")).isFalse(); + assertThat(p.test("b")).isFalse(); + } + + @Test + public void testFuzzy() { + P p = Search.fuzzy("abc", 1); + assertThat(p.test("abcd")).isTrue(); + assertThat(p.test("ab")).isTrue(); + assertThat(p.test("abce")).isTrue(); + assertThat(p.test("abdc")).isTrue(); + assertThat(p.test("badc")).isFalse(); + + // Make sure we do NOT calculate the Damerau–Levenshtein distance (2), but the optimal string + // alignment distance (3): + assertThat(Search.tokenFuzzy("ca", 2).test("abc")).isFalse(); + } + + @Test + public void testTokenFuzzy() { + P p = Search.tokenFuzzy("abc", 1); + assertThat(p.test("foo abcd")).isTrue(); + assertThat(p.test("foo ab")).isTrue(); + assertThat(p.test("foo abce")).isTrue(); + assertThat(p.test("foo abdc")).isTrue(); + assertThat(p.test("foo badc")).isFalse(); + + // Make sure we do NOT calculate the Damerau–Levenshtein distance (2), but the optimal string + // alignment distance (3): + assertThat(Search.tokenFuzzy("ca", 2).test("abc 123")).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java new file mode 100644 index 00000000000..d001f791e82 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTest.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal; + +import java.nio.file.Path; +import java.nio.file.Paths; + +public class DependencyCheckTest extends DependencyCheckTestBase { + + @Override + protected Path getDepsTxtPath() { + return Paths.get( + getBaseResourcePathString(), + "target", + "classes", + "com", + "datastax", + "dse", + "driver", + "internal", + "deps.txt"); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java new file mode 100644 index 00000000000..f2ce5513d65 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/DependencyCheckTestBase.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Properties; +import org.junit.Test; + +public abstract class DependencyCheckTestBase { + + private String baseResourcePath; + + protected DependencyCheckTestBase() { + Properties projectProperties = new Properties(); + try (InputStream is = this.getClass().getResourceAsStream("/project.properties")) { + projectProperties.load(is); + baseResourcePath = projectProperties.getProperty("project.basedir"); + } catch (IOException ioe) { + throw new AssertionError( + "Error retrieving \"project.basedir\" value from \"/project.properties\". Please check test resources in this project.", + ioe); + } + assert baseResourcePath != null; + } + + @Test + public void should_generate_deps_txt() { + assertThat(getDepsTxtPath()).exists(); + } + + protected final String getBaseResourcePathString() { + return baseResourcePath; + } + + protected abstract Path getDepsTxtPath(); +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java new file mode 100644 index 00000000000..9e4556e528d --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/context/DseStartupOptionsBuilderTest.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.context; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; +import com.datastax.oss.protocol.internal.request.Startup; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; + +@RunWith(DataProviderRunner.class) +public class DseStartupOptionsBuilderTest { + + private DefaultDriverContext driverContext; + + // Mocks for instantiating the DSE driver context + @Mock private DriverConfigLoader configLoader; + @Mock private DriverConfig driverConfig; + @Mock private DriverExecutionProfile defaultProfile; + + @Before + public void before() { + initMocks(this); + when(configLoader.getInitialConfig()).thenReturn(driverConfig); + when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); + when(defaultProfile.isDefined(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE)).thenReturn(true); + } + + private void buildContext(UUID clientId, String applicationName, String applicationVersion) { + this.driverContext = + new DefaultDriverContext( + configLoader, + ProgrammaticArguments.builder() + .withStartupClientId(clientId) + .withStartupApplicationName(applicationName) + .withStartupApplicationVersion(applicationVersion) + .build()); + } + + private void assertDefaultStartupOptions(Startup startup) { + assertThat(startup.options).containsEntry(Startup.CQL_VERSION_KEY, "3.0.0"); + assertThat(startup.options) + .containsEntry( + StartupOptionsBuilder.DRIVER_NAME_KEY, Session.OSS_DRIVER_COORDINATES.getName()); + assertThat(startup.options).containsKey(StartupOptionsBuilder.DRIVER_VERSION_KEY); + Version version = Version.parse(startup.options.get(StartupOptionsBuilder.DRIVER_VERSION_KEY)); + assertThat(version).isEqualTo(Session.OSS_DRIVER_COORDINATES.getVersion()); + assertThat(startup.options).containsKey(StartupOptionsBuilder.CLIENT_ID_KEY); + } + + @Test + public void should_build_startup_options_with_no_compression_if_undefined() { + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("none"); + buildContext(null, null, null); + Startup startup = new Startup(driverContext.getStartupOptions()); + assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); + assertDefaultStartupOptions(startup); + } + + @Test + @DataProvider({"lz4", "snappy"}) + public void should_build_startup_options_with_compression(String compression) { + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn(compression); + buildContext(null, null, null); + Startup startup = new Startup(driverContext.getStartupOptions()); + // assert the compression option is present + assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, compression); + assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_NAME_KEY); + assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_VERSION_KEY); + assertDefaultStartupOptions(startup); + } + + @Test + public void should_fail_to_build_startup_options_with_invalid_compression() { + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("foobar"); + buildContext(null, null, null); + assertThatIllegalArgumentException() + .isThrownBy(() -> new Startup(driverContext.getStartupOptions())); + } + + @Test + public void should_build_startup_options_with_client_id() { + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("none"); + UUID customClientId = Uuids.random(); + buildContext(customClientId, null, null); + Startup startup = new Startup(driverContext.getStartupOptions()); + // assert the client id is present + assertThat(startup.options) + .containsEntry(StartupOptionsBuilder.CLIENT_ID_KEY, customClientId.toString()); + assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); + assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_NAME_KEY); + assertThat(startup.options).doesNotContainKey(StartupOptionsBuilder.APPLICATION_VERSION_KEY); + assertDefaultStartupOptions(startup); + } + + @Test + public void should_build_startup_options_with_application_version_and_name() { + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("none"); + buildContext(null, "Custom_App_Name", "Custom_App_Version"); + Startup startup = new Startup(driverContext.getStartupOptions()); + // assert the app name and version are present + assertThat(startup.options) + .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name"); + assertThat(startup.options) + .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); + assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); + assertDefaultStartupOptions(startup); + } + + @Test + public void should_build_startup_options_with_all_options() { + // mock config to specify "snappy" compression + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("snappy"); + + UUID customClientId = Uuids.random(); + + buildContext(customClientId, "Custom_App_Name", "Custom_App_Version"); + Startup startup = new Startup(driverContext.getStartupOptions()); + assertThat(startup.options) + .containsEntry(StartupOptionsBuilder.CLIENT_ID_KEY, customClientId.toString()) + .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name") + .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); + assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, "snappy"); + assertDefaultStartupOptions(startup); + } + + @Test + public void should_use_configuration_when_no_programmatic_values_provided() { + when(defaultProfile.getString(DseDriverOption.APPLICATION_NAME, null)) + .thenReturn("Config_App_Name"); + when(defaultProfile.getString(DseDriverOption.APPLICATION_VERSION, null)) + .thenReturn("Config_App_Version"); + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("none"); + + buildContext(null, null, null); + Startup startup = new Startup(driverContext.getStartupOptions()); + + assertThat(startup.options) + .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Config_App_Name") + .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Config_App_Version"); + } + + @Test + public void should_ignore_configuration_when_programmatic_values_provided() { + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("none"); + + buildContext(null, "Custom_App_Name", "Custom_App_Version"); + Startup startup = new Startup(driverContext.getStartupOptions()); + + assertThat(startup.options) + .containsEntry(StartupOptionsBuilder.APPLICATION_NAME_KEY, "Custom_App_Name") + .containsEntry(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "Custom_App_Version"); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java new file mode 100644 index 00000000000..1edb7c183bf --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerNodeTargetingTest.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.never; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletionStage; +import org.junit.Test; +import org.mockito.InOrder; +import org.mockito.Mockito; + +public class ContinuousCqlRequestHandlerNodeTargetingTest + extends ContinuousCqlRequestHandlerTestBase { + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_fail_if_targeted_node_not_available(DseProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) + .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) + .withEmptyPool(node3) + .withProtocolVersion(version) + .build()) { + + LoadBalancingPolicyWrapper loadBalancingPolicy = + harness.getContext().getLoadBalancingPolicyWrapper(); + InOrder invocations = Mockito.inOrder(loadBalancingPolicy); + + // target node3, which should be unavailable + CompletionStage resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT.setNode(node3), + harness.getSession(), + harness.getContext(), + "target node 3, unavailable") + .handle(); + + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(AllNodesFailedException.class); + Map> errors = + ((AllNodesFailedException) error).getAllErrors(); + assertThat(errors).hasSize(1); + List nodeErrors = errors.values().iterator().next(); + assertThat(nodeErrors).singleElement().isInstanceOf(NodeUnavailableException.class); + invocations + .verify(loadBalancingPolicy, never()) + .newQueryPlan(any(Request.class), anyString(), any(Session.class)); + }); + + resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "no node targeting, should use node 1") + .handle(); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node1); + invocations + .verify(loadBalancingPolicy) + .newQueryPlan( + UNDEFINED_IDEMPOTENCE_STATEMENT, + DriverExecutionProfile.DEFAULT_NAME, + harness.getSession()); + }); + + resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "no node targeting, should use node 2") + .handle(); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node2); + invocations + .verify(loadBalancingPolicy) + .newQueryPlan( + UNDEFINED_IDEMPOTENCE_STATEMENT, + DriverExecutionProfile.DEFAULT_NAME, + harness.getSession()); + }); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_target_node(DseProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) + .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) + .withResponse(node3, defaultFrameOf(DseTestFixtures.singleDseRow())) + .withProtocolVersion(version) + .build()) { + + LoadBalancingPolicyWrapper loadBalancingPolicy = + harness.getContext().getLoadBalancingPolicyWrapper(); + InOrder invocations = Mockito.inOrder(loadBalancingPolicy); + + CompletionStage resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT.setNode(node3), + harness.getSession(), + harness.getContext(), + "target node 3") + .handle(); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node3); + invocations + .verify(loadBalancingPolicy, never()) + .newQueryPlan(any(Request.class), anyString(), any(Session.class)); + }); + + resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "no node targeting") + .handle(); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + assertThat(resultSet.getExecutionInfo().getCoordinator()).isEqualTo(node1); + invocations + .verify(loadBalancingPolicy) + .newQueryPlan( + UNDEFINED_IDEMPOTENCE_STATEMENT, + DriverExecutionProfile.DEFAULT_NAME, + harness.getSession()); + }); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java new file mode 100644 index 00000000000..fd8d0ea1f98 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerReprepareTest.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static com.datastax.oss.protocol.internal.Frame.NO_PAYLOAD; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.servererrors.SyntaxError; +import com.datastax.oss.driver.internal.core.adminrequest.AdminRequestHandler; +import com.datastax.oss.driver.internal.core.adminrequest.UnexpectedResponseException; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.session.RepreparePayload; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.ProtocolConstants.ErrorCode; +import com.datastax.oss.protocol.internal.request.Prepare; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.protocol.internal.response.Error; +import com.datastax.oss.protocol.internal.response.error.Unprepared; +import com.datastax.oss.protocol.internal.response.result.Prepared; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.netty.util.concurrent.Future; +import java.nio.ByteBuffer; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import org.junit.Test; +import org.mockito.Mock; + +public class ContinuousCqlRequestHandlerReprepareTest extends ContinuousCqlRequestHandlerTestBase { + + private final byte[] preparedId = {1, 2, 3}; + private final ByteBuffer preparedIdBuf = ByteBuffer.wrap(preparedId); + + private final RepreparePayload repreparePayload = + new RepreparePayload(preparedIdBuf, "irrelevant", CqlIdentifier.fromCql("ks"), NO_PAYLOAD); + + private final ConcurrentMap repreparePayloads = + new ConcurrentHashMap<>(ImmutableMap.of(preparedIdBuf, repreparePayload)); + + private final Unprepared unprepared = new Unprepared("test", preparedId); + private final Prepared prepared = new Prepared(preparedId, null, null, null); + private final Error unrecoverable = + new Error(ProtocolConstants.ErrorCode.SYNTAX_ERROR, "bad query"); + private final Error recoverable = new Error(ErrorCode.SERVER_ERROR, "sorry"); + + @Mock private Future future; + + @Override + public void setup() { + super.setup(); + when(future.isSuccess()).thenReturn(true); + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_prepare_and_retry_on_same_node(DseProtocolVersion version) { + + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withResponse(node1, defaultFrameOf(unprepared)) + .withProtocolVersion(version) + .build()) { + + when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); + when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) + .then( + invocation -> { + AdminRequestHandler admin = invocation.getArgument(3); + admin.onResponse(defaultFrameOf(prepared)); + return future; + }); + + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test") + .handle(); + + verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); + // should have attempted to execute the query twice on the same node + verify(harness.getChannel(node1), times(2)) + .write(any(Query.class), anyBoolean(), anyMap(), any()); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_abort_when_prepare_fails_with_unrecoverable_error(DseProtocolVersion version) { + + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withResponse(node1, defaultFrameOf(unprepared)) + .withProtocolVersion(version) + .build()) { + + when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); + when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) + .then( + invocation -> { + AdminRequestHandler admin = invocation.getArgument(3); + admin.onResponse(defaultFrameOf(unrecoverable)); + return future; + }); + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + verify(harness.getChannel(node1)).write(any(Query.class), anyBoolean(), anyMap(), any()); + verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); + + assertThat(handler.getState()).isEqualTo(-2); + assertThat(page1Future).isCompletedExceptionally(); + Throwable t = catchThrowable(() -> page1Future.toCompletableFuture().get()); + assertThat(t).hasRootCauseInstanceOf(SyntaxError.class).hasMessageContaining("bad query"); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_try_next_node_when_prepare_fails_with_recoverable_error( + DseProtocolVersion version) { + + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withResponse(node1, defaultFrameOf(unprepared)) + .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) + .withProtocolVersion(version) + .build()) { + + when(harness.getSession().getRepreparePayloads()).thenReturn(repreparePayloads); + when(harness.getChannel(node1).write(any(Prepare.class), anyBoolean(), anyMap(), any())) + .then( + invocation -> { + AdminRequestHandler admin = invocation.getArgument(3); + admin.onResponse(defaultFrameOf(recoverable)); + return future; + }); + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + verify(harness.getChannel(node1)).write(any(Query.class), anyBoolean(), anyMap(), any()); + verify(harness.getChannel(node1)).write(any(Prepare.class), anyBoolean(), anyMap(), any()); + // should have tried the next host + verify(harness.getChannel(node2)).write(any(Query.class), anyBoolean(), anyMap(), any()); + + assertThat(handler.getState()).isEqualTo(-1); + assertThatStage(page1Future) + .isSuccess( + rs -> { + assertThat(rs.currentPage()).hasSize(1); + assertThat(rs.hasMorePages()).isFalse(); + assertThat(rs.getExecutionInfo().getCoordinator()).isEqualTo(node2); + assertThat(rs.getExecutionInfo().getErrors()) + .hasSize(1) + .allSatisfy( + entry -> { + assertThat(entry.getKey()).isEqualTo(node1); + assertThat(entry.getValue()) + .isInstanceOf(UnexpectedResponseException.class) + .hasMessageContaining(recoverable.toString()); + }); + }); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java new file mode 100644 index 00000000000..97fe82985de --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerRetryTest.java @@ -0,0 +1,600 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static com.datastax.dse.driver.DseTestDataProviders.allDseProtocolVersions; +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static com.datastax.oss.driver.TestDataProviders.combine; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atMost; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.connection.HeartbeatException; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; +import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; +import com.datastax.oss.driver.api.core.servererrors.ServerError; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.response.Error; +import com.datastax.oss.protocol.internal.response.error.ReadTimeout; +import com.datastax.oss.protocol.internal.response.error.Unavailable; +import com.datastax.oss.protocol.internal.response.error.WriteTimeout; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.mockito.Mockito; + +public class ContinuousCqlRequestHandlerRetryTest extends ContinuousCqlRequestHandlerTestBase { + + @Test + @UseDataProvider("allIdempotenceConfigs") + public void should_always_try_next_node_if_bootstrapping( + boolean defaultIdempotence, Statement statement, DseProtocolVersion version) { + + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence) + .withResponse( + node1, + defaultFrameOf( + new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) + .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) + .build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-1); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows.next().getString("message")).isEqualTo("hello, world"); + + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node2); + assertThat(executionInfo.getErrors()).hasSize(1); + assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); + assertThat(executionInfo.getErrors().get(0).getValue()) + .isInstanceOf(BootstrappingException.class); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + + Mockito.verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); + }); + } + } + + @Test + @UseDataProvider("allIdempotenceConfigs") + public void should_always_rethrow_query_validation_error( + boolean defaultIdempotence, Statement statement, DseProtocolVersion version) { + + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence) + .withResponse( + node1, + defaultFrameOf(new Error(ProtocolConstants.ErrorCode.INVALID, "mock message"))) + .build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-2); + + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error) + .isInstanceOf(InvalidQueryException.class) + .hasMessage("mock message"); + Mockito.verifyNoMoreInteractions(harness.getContext().getRetryPolicy(anyString())); + + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(DefaultNodeMetric.OTHER_ERRORS), anyString()); + Mockito.verify(nodeMetricUpdater1) + .updateTimer( + eq(DefaultNodeMetric.CQL_MESSAGES), + anyString(), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); + }); + } + } + + @Test + @UseDataProvider("failureAndIdempotent") + public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( + FailureScenario failureScenario, + boolean defaultIdempotence, + Statement statement, + DseProtocolVersion version) { + + RequestHandlerTestHarness.Builder harnessBuilder = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence); + failureScenario.mockRequestError(harnessBuilder, node1); + harnessBuilder.withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())); + + try (RequestHandlerTestHarness harness = harnessBuilder.build()) { + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-1); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows.next().getString("message")).isEqualTo("hello, world"); + + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node2); + assertThat(executionInfo.getErrors()).hasSize(1); + assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); + + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.errorMetric), anyString()); + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(DefaultNodeMetric.RETRIES), anyString()); + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.retryMetric), anyString()); + Mockito.verify(nodeMetricUpdater1, atMost(1)) + .updateTimer( + eq(DefaultNodeMetric.CQL_MESSAGES), + anyString(), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); + }); + } + } + + @Test + @UseDataProvider("failureAndIdempotent") + public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( + FailureScenario failureScenario, + boolean defaultIdempotence, + Statement statement, + DseProtocolVersion version) { + + RequestHandlerTestHarness.Builder harnessBuilder = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence); + failureScenario.mockRequestError(harnessBuilder, node1); + harnessBuilder.withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())); + + try (RequestHandlerTestHarness harness = harnessBuilder.build()) { + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_SAME); + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-1); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows.next().getString("message")).isEqualTo("hello, world"); + + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).hasSize(1); + assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); + + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.errorMetric), anyString()); + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(DefaultNodeMetric.RETRIES), anyString()); + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.retryMetric), anyString()); + Mockito.verify(nodeMetricUpdater1, atMost(2)) + .updateTimer( + eq(DefaultNodeMetric.CQL_MESSAGES), + anyString(), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); + }); + } + } + + @Test + @UseDataProvider("failureAndIdempotent") + public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( + FailureScenario failureScenario, + boolean defaultIdempotence, + Statement statement, + DseProtocolVersion version) { + + RequestHandlerTestHarness.Builder harnessBuilder = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence); + failureScenario.mockRequestError(harnessBuilder, node1); + + try (RequestHandlerTestHarness harness = harnessBuilder.build()) { + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.IGNORE); + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-1); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isFalse(); + + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).hasSize(0); + + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.errorMetric), anyString()); + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(DefaultNodeMetric.IGNORES), anyString()); + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.ignoreMetric), anyString()); + Mockito.verify(nodeMetricUpdater1, atMost(1)) + .updateTimer( + eq(DefaultNodeMetric.CQL_MESSAGES), + anyString(), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); + }); + } + } + + @Test + @UseDataProvider("failureAndIdempotent") + public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( + FailureScenario failureScenario, + boolean defaultIdempotence, + Statement statement, + DseProtocolVersion version) { + + RequestHandlerTestHarness.Builder harnessBuilder = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence); + failureScenario.mockRequestError(harnessBuilder, node1); + + try (RequestHandlerTestHarness harness = harnessBuilder.build()) { + + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-2); + + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); + + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.errorMetric), anyString()); + Mockito.verify(nodeMetricUpdater1, atMost(1)) + .updateTimer( + eq(DefaultNodeMetric.CQL_MESSAGES), + anyString(), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); + }); + } + } + + @Test + @UseDataProvider("failureAndNotIdempotent") + public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_rethrows( + FailureScenario failureScenario, + boolean defaultIdempotence, + Statement statement, + DseProtocolVersion version) { + + // For two of the possible exceptions, the retry policy is called even if the statement is not + // idempotent + boolean shouldCallRetryPolicy = + (failureScenario.expectedExceptionClass.equals(UnavailableException.class) + || failureScenario.expectedExceptionClass.equals(ReadTimeoutException.class)); + + RequestHandlerTestHarness.Builder harnessBuilder = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withDefaultIdempotence(defaultIdempotence); + failureScenario.mockRequestError(harnessBuilder, node1); + harnessBuilder.withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())); + + try (RequestHandlerTestHarness harness = harnessBuilder.build()) { + + if (shouldCallRetryPolicy) { + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); + } + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + statement, harness.getSession(), harness.getContext(), "test"); + CompletionStage resultSetFuture = handler.handle(); + + assertThat(handler.getState()).isEqualTo(-2); + + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(failureScenario.expectedExceptionClass); + // When non idempotent, the policy is bypassed completely: + if (!shouldCallRetryPolicy) { + Mockito.verifyNoMoreInteractions( + harness.getContext().getRetryPolicy(anyString())); + } + + Mockito.verify(nodeMetricUpdater1) + .incrementCounter(eq(failureScenario.errorMetric), anyString()); + Mockito.verify(nodeMetricUpdater1, atMost(1)) + .updateTimer( + eq(DefaultNodeMetric.CQL_MESSAGES), + anyString(), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + Mockito.verifyNoMoreInteractions(nodeMetricUpdater1); + }); + } + } + + /** + * Sets up the mocks to simulate an error from a node, and make the retry policy return a given + * decision for that error. + */ + private abstract static class FailureScenario { + private final Class expectedExceptionClass; + final DefaultNodeMetric errorMetric; + final DefaultNodeMetric retryMetric; + final DefaultNodeMetric ignoreMetric; + + FailureScenario( + Class expectedExceptionClass, + DefaultNodeMetric errorMetric, + DefaultNodeMetric retryMetric, + DefaultNodeMetric ignoreMetric) { + this.expectedExceptionClass = expectedExceptionClass; + this.errorMetric = errorMetric; + this.retryMetric = retryMetric; + this.ignoreMetric = ignoreMetric; + } + + abstract void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node); + + abstract void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict); + } + + @DataProvider + public static Object[][] failure() { + return TestDataProviders.fromList( + new FailureScenario( + ReadTimeoutException.class, + DefaultNodeMetric.READ_TIMEOUTS, + DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, + DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT) { + @Override + public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { + builder.withResponse( + node, + defaultFrameOf( + new ReadTimeout( + "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 1, 2, true))); + } + + @Override + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onReadTimeoutVerdict( + any(SimpleStatement.class), + eq(DefaultConsistencyLevel.LOCAL_ONE), + eq(2), + eq(1), + eq(true), + eq(0))) + .thenReturn(verdict); + } + }, + new FailureScenario( + WriteTimeoutException.class, + DefaultNodeMetric.WRITE_TIMEOUTS, + DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, + DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT) { + @Override + public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { + builder.withResponse( + node, + defaultFrameOf( + new WriteTimeout( + "mock message", + ProtocolConstants.ConsistencyLevel.LOCAL_ONE, + 1, + 2, + ProtocolConstants.WriteType.SIMPLE))); + } + + @Override + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onWriteTimeoutVerdict( + any(SimpleStatement.class), + eq(DefaultConsistencyLevel.LOCAL_ONE), + eq(DefaultWriteType.SIMPLE), + eq(2), + eq(1), + eq(0))) + .thenReturn(verdict); + } + }, + new FailureScenario( + UnavailableException.class, + DefaultNodeMetric.UNAVAILABLES, + DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, + DefaultNodeMetric.IGNORES_ON_UNAVAILABLE) { + @Override + public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { + builder.withResponse( + node, + defaultFrameOf( + new Unavailable( + "mock message", ProtocolConstants.ConsistencyLevel.LOCAL_ONE, 2, 1))); + } + + @Override + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onUnavailableVerdict( + any(SimpleStatement.class), + eq(DefaultConsistencyLevel.LOCAL_ONE), + eq(2), + eq(1), + eq(0))) + .thenReturn(verdict); + } + }, + new FailureScenario( + ServerError.class, + DefaultNodeMetric.OTHER_ERRORS, + DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, + DefaultNodeMetric.IGNORES_ON_OTHER_ERROR) { + @Override + public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { + builder.withResponse( + node, + defaultFrameOf( + new Error(ProtocolConstants.ErrorCode.SERVER_ERROR, "mock server error"))); + } + + @Override + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onErrorResponseVerdict( + any(SimpleStatement.class), any(ServerError.class), eq(0))) + .thenReturn(verdict); + } + }, + new FailureScenario( + HeartbeatException.class, + DefaultNodeMetric.ABORTED_REQUESTS, + DefaultNodeMetric.RETRIES_ON_ABORTED, + DefaultNodeMetric.IGNORES_ON_ABORTED) { + @Override + public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node) { + builder.withResponseFailure(node, Mockito.mock(HeartbeatException.class)); + } + + @Override + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onRequestAbortedVerdict( + any(SimpleStatement.class), any(HeartbeatException.class), eq(0))) + .thenReturn(verdict); + } + }); + } + + @DataProvider + public static Object[][] failureAndIdempotent() { + return combine(failure(), excludeBatchStatements(idempotentConfig()), allDseProtocolVersions()); + } + + @DataProvider + public static Object[][] failureAndNotIdempotent() { + return combine( + failure(), excludeBatchStatements(nonIdempotentConfig()), allDseProtocolVersions()); + } + + @DataProvider + public static Object[][] allIdempotenceConfigs() { + return combine( + excludeBatchStatements(ContinuousCqlRequestHandlerTestBase.allIdempotenceConfigs()), + allDseProtocolVersions()); + } + + private static Object[][] excludeBatchStatements(Object[][] configs) { + List result = new ArrayList<>(); + for (Object[] config : configs) { + if (!(config[1] instanceof BatchStatement)) { + result.add(config); + } + } + return result.toArray(new Object[][] {}); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java new file mode 100644 index 00000000000..a816183e9ee --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTest.java @@ -0,0 +1,529 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; +import static com.datastax.dse.protocol.internal.DseProtocolConstants.RevisionType.CANCEL_CONTINUOUS_PAGING; +import static com.datastax.dse.protocol.internal.DseProtocolConstants.RevisionType.MORE_CONTINUOUS_PAGES; +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.protocol.internal.request.Revise; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.internal.core.ProtocolFeature; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Iterator; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.junit.Test; +import org.mockito.Mockito; + +public class ContinuousCqlRequestHandlerTest extends ContinuousCqlRequestHandlerTestBase { + + private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("test\\|\\d*\\|\\d"); + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_complete_single_page_result(DseProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) + .build()) { + + CompletionStage resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows.next().getString("message")).isEqualTo("hello, world"); + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + }); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_complete_multi_page_result(DseProtocolVersion version) { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + + assertThatStage(page1Future) + .isSuccess( + page1 -> { + assertThat(page1.hasMorePages()).isTrue(); + assertThat(page1.pageNumber()).isEqualTo(1); + Iterator rows = page1.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows).toIterable().hasSize(10); + ExecutionInfo executionInfo = page1.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNotNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + }); + + ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); + assertThat(handler.getPendingResult()).isNull(); + CompletionStage page2Future = page1.fetchNextPage(); + assertThat(handler.getPendingResult()).isNotNull(); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, true))); + + assertThatStage(page2Future) + .isSuccess( + page2 -> { + assertThat(page2.hasMorePages()).isFalse(); + assertThat(page2.pageNumber()).isEqualTo(2); + Iterator rows = page2.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows).toIterable().hasSize(10); + ExecutionInfo executionInfo = page2.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + }); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_fail_if_no_node_available(DseProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withProtocolVersion(version) + // Mock no responses => this will produce an empty query plan + .build()) { + + CompletionStage resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + assertThatStage(resultSetFuture) + .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); + } + } + + @Test + @UseDataProvider(value = "allOssProtocolVersions", location = DseTestDataProviders.class) + public void should_throw_if_protocol_version_does_not_support_continuous_paging( + ProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder().withProtocolVersion(version).build()) { + Mockito.when( + harness + .getContext() + .getProtocolVersionRegistry() + .supports(any(DefaultProtocolVersion.class), any(ProtocolFeature.class))) + .thenReturn(false); + assertThatThrownBy( + () -> + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle()) + .isInstanceOf(IllegalStateException.class) + .hasMessage("Cannot execute continuous paging requests with protocol version " + version); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_time_out_if_first_page_takes_too_long(DseProtocolVersion version) + throws Exception { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + CompletionStage resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + // mark the initial request as successful, which should schedule a timeout for the first page + node1Behavior.setWriteSuccess(); + CapturedTimeout page1Timeout = harness.nextScheduledTimeout(); + assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)) + .isEqualTo(TIMEOUT_FIRST_PAGE.toNanos()); + + page1Timeout.task().run(page1Timeout); + + assertThatStage(resultSetFuture) + .isFailed( + t -> + assertThat(t) + .isInstanceOf(DriverTimeoutException.class) + .hasMessageContaining("Timed out waiting for page 1")); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_time_out_if_other_page_takes_too_long(DseProtocolVersion version) + throws Exception { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + + try (RequestHandlerTestHarness harness = builder.build()) { + + CompletionStage page1Future = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + // mark the initial request as successful, which should schedule a timeout for the first page + node1Behavior.setWriteSuccess(); + CapturedTimeout page1Timeout = harness.nextScheduledTimeout(); + assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)) + .isEqualTo(TIMEOUT_FIRST_PAGE.toNanos()); + + // the server replies with page 1, the corresponding timeout should be cancelled + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + assertThat(page1Timeout.isCancelled()).isTrue(); + + // request page 2, the queue is empty so this should request more pages and schedule another + // timeout + ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future); + CompletionStage page2Future = page1.fetchNextPage(); + CapturedTimeout page2Timeout = harness.nextScheduledTimeout(); + assertThat(page2Timeout.getDelay(TimeUnit.NANOSECONDS)) + .isEqualTo(TIMEOUT_OTHER_PAGES.toNanos()); + + page2Timeout.task().run(page2Timeout); + + assertThatStage(page2Future) + .isFailed( + t -> + assertThat(t) + .isInstanceOf(DriverTimeoutException.class) + .hasMessageContaining("Timed out waiting for page 2")); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_cancel_future_if_session_cancelled(DseProtocolVersion version) { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + // will be discarded + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); + + ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future); + page1.cancel(); + + assertThat(handler.getState()).isEqualTo(-2); + assertThat(page1.fetchNextPage()).isCancelled(); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_cancel_session_if_future_cancelled(DseProtocolVersion version) { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + page1Future.toCompletableFuture().cancel(true); + // this should be ignored + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + assertThat(handler.getState()).isEqualTo(-2); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_not_cancel_session_if_future_cancelled_but_already_done( + DseProtocolVersion version) { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + // this will complete page 1 future + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, true))); + + // to late + page1Future.toCompletableFuture().cancel(true); + assertThat(handler.getState()).isEqualTo(-1); + } + } + + @Test + public void should_send_cancel_request_if_dse_v2() { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(DSE_V2); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + ContinuousCqlRequestHandler handler = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test"); + CompletionStage page1Future = handler.handle(); + + page1Future.toCompletableFuture().cancel(true); + assertThat(handler.getState()).isEqualTo(-2); + verify(node1Behavior.getChannel()) + .write(argThat(this::isCancelRequest), anyBoolean(), anyMap(), any()); + } + } + + @Test + public void should_toggle_channel_autoread_if_dse_v1() { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(DSE_V1); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + CompletionStage page1Future = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + // simulate the arrival of 5 pages, the first one will complete page1 future above, + // the following 4 will be enqueued and should trigger autoread off + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(3, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(4, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(5, false))); + + verify(node1Behavior.getChannel().config()).setAutoRead(false); + + // simulate the retrieval of 2 pages, this should dequeue page 2 + // and trigger autoread on + ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); + CompletableFutures.getCompleted(page1.fetchNextPage()); + + verify(node1Behavior.getChannel().config()).setAutoRead(true); + + // in DSE_V1, the backpressure request should not have been sent + verify(node1Behavior.getChannel(), never()) + .write(any(Revise.class), anyBoolean(), anyMap(), any()); + } + } + + @Test + public void should_send_backpressure_request_if_dse_v2() { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(DSE_V2); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + CompletionStage page1Future = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + // simulate the arrival of 4 pages, the first one will complete page1 future above, + // the following 3 will be enqueued + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(3, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(4, false))); + + // simulate the retrieval of 2 pages, this should dequeue page 2 + // and trigger a backpressure request as the queue is now half empty (2/4) + ContinuousAsyncResultSet page1 = CompletableFutures.getCompleted(page1Future); + CompletableFutures.getCompleted(page1.fetchNextPage()); + + verify(node1Behavior.getChannel()) + .write(argThat(this::isBackpressureRequest), anyBoolean(), anyMap(), any()); + // should not mess with autoread in dse v2 + verify(node1Behavior.getChannel().config(), never()).setAutoRead(anyBoolean()); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_invoke_request_tracker(DseProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withResponse( + node1, + defaultFrameOf( + new com.datastax.oss.protocol.internal.response.Error( + ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))) + .withResponse(node2, defaultFrameOf(DseTestFixtures.singleDseRow())) + .build()) { + + RequestTracker requestTracker = mock(RequestTracker.class); + when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); + + CompletionStage resultSetFuture = + new ContinuousCqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows.next().getString("message")).isEqualTo("hello, world"); + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node2); + assertThat(executionInfo.getErrors()).isNotEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + + verify(requestTracker) + .onNodeError( + eq(UNDEFINED_IDEMPOTENCE_STATEMENT), + any(BootstrappingException.class), + anyLong(), + any(DriverExecutionProfile.class), + eq(node1), + matches(LOG_PREFIX_PER_REQUEST)); + verify(requestTracker) + .onNodeSuccess( + eq(UNDEFINED_IDEMPOTENCE_STATEMENT), + anyLong(), + any(DriverExecutionProfile.class), + eq(node2), + matches(LOG_PREFIX_PER_REQUEST)); + verify(requestTracker) + .onSuccess( + eq(UNDEFINED_IDEMPOTENCE_STATEMENT), + anyLong(), + any(DriverExecutionProfile.class), + eq(node2), + matches(LOG_PREFIX_PER_REQUEST)); + verifyNoMoreInteractions(requestTracker); + }); + } + } + + private boolean isBackpressureRequest(Message argument) { + return argument instanceof Revise && ((Revise) argument).revisionType == MORE_CONTINUOUS_PAGES; + } + + private boolean isCancelRequest(Message argument) { + return argument instanceof Revise + && ((Revise) argument).revisionType == CANCEL_CONTINUOUS_PAGING; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java new file mode 100644 index 00000000000..04195f5faf0 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/ContinuousCqlRequestHandlerTestBase.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES; +import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE; +import static com.datastax.dse.driver.api.core.config.DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import java.time.Duration; + +public abstract class ContinuousCqlRequestHandlerTestBase extends CqlRequestHandlerTestBase { + + static final Duration TIMEOUT_FIRST_PAGE = Duration.ofSeconds(2); + static final Duration TIMEOUT_OTHER_PAGES = Duration.ofSeconds(1); + + protected RequestHandlerTestHarness.Builder continuousHarnessBuilder() { + return new RequestHandlerTestHarness.Builder() { + @Override + public RequestHandlerTestHarness build() { + RequestHandlerTestHarness harness = super.build(); + DriverExecutionProfile config = harness.getContext().getConfig().getDefaultProfile(); + when(config.getDuration(CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE)) + .thenReturn(TIMEOUT_FIRST_PAGE); + when(config.getDuration(CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES)) + .thenReturn(TIMEOUT_OTHER_PAGES); + when(config.getInt(CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES)).thenReturn(4); + return harness; + } + }; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java new file mode 100644 index 00000000000..1e59559013f --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousAsyncResultSetTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.verify; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import org.assertj.core.api.ThrowableAssert.ThrowingCallable; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class DefaultContinuousAsyncResultSetTest { + + @Mock private ColumnDefinitions columnDefinitions; + @Mock private ExecutionInfo executionInfo; + @Mock private ContinuousCqlRequestHandler handler; + @Mock private CountingIterator rows; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + } + + @Test + public void should_fail_to_fetch_next_page_if_last() { + // Given + given(executionInfo.getPagingState()).willReturn(null); + DefaultContinuousAsyncResultSet resultSet = + new DefaultContinuousAsyncResultSet( + rows, columnDefinitions, 1, false, executionInfo, handler); + + // When + boolean hasMorePages = resultSet.hasMorePages(); + ThrowingCallable nextPage = resultSet::fetchNextPage; + + // Then + assertThat(hasMorePages).isFalse(); + assertThatThrownBy(nextPage) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("Can't call fetchNextPage() on the last page"); + } + + @Test + public void should_invoke_handler_to_fetch_next_page() { + // Given + CompletableFuture mockResultFuture = new CompletableFuture<>(); + given(handler.fetchNextPage()).willReturn(mockResultFuture); + DefaultContinuousAsyncResultSet resultSet = + new DefaultContinuousAsyncResultSet( + rows, columnDefinitions, 1, true, executionInfo, handler); + + // When + boolean hasMorePages = resultSet.hasMorePages(); + CompletionStage nextPageFuture = resultSet.fetchNextPage(); + + // Then + assertThat(hasMorePages).isTrue(); + verify(handler).fetchNextPage(); + assertThat(nextPageFuture).isEqualTo(mockResultFuture); + } + + @Test + public void should_invoke_handler_to_cancel() { + // Given + DefaultContinuousAsyncResultSet resultSet = + new DefaultContinuousAsyncResultSet( + rows, columnDefinitions, 1, true, executionInfo, handler); + // When + resultSet.cancel(); + + // Then + verify(handler).cancel(); + } + + @Test + public void should_report_remaining_rows() { + // Given + given(rows.remaining()).willReturn(42); + DefaultContinuousAsyncResultSet resultSet = + new DefaultContinuousAsyncResultSet( + rows, columnDefinitions, 1, true, executionInfo, handler); + + // When + int remaining = resultSet.remaining(); + Iterable currentPage = resultSet.currentPage(); + + // Then + assertThat(remaining).isEqualTo(42); + assertThat(currentPage.iterator()).isSameAs(rows); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java new file mode 100644 index 00000000000..2bfb4768e49 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/DefaultContinuousResultSetTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import java.util.Arrays; +import java.util.Iterator; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import org.junit.Test; +import org.mockito.Mockito; + +public class DefaultContinuousResultSetTest { + + @Test + public void should_create_result_set_from_single_page() { + // Given + ContinuousAsyncResultSet page1 = mockPage(false, 0, 1, 2); + + // When + ResultSet resultSet = new DefaultContinuousResultSet(page1); + + // Then + assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); + assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); + assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); + + Iterator iterator = resultSet.iterator(); + + assertNextRow(iterator, 0); + assertNextRow(iterator, 1); + assertNextRow(iterator, 2); + + assertThat(iterator.hasNext()).isFalse(); + } + + @Test + public void should_create_result_set_from_multiple_pages() { + // Given + ContinuousAsyncResultSet page1 = mockPage(true, 0, 1, 2); + ContinuousAsyncResultSet page2 = mockPage(true, 3, 4, 5); + ContinuousAsyncResultSet page3 = mockPage(false, 6, 7, 8); + + complete(page1.fetchNextPage(), page2); + complete(page2.fetchNextPage(), page3); + + // When + ResultSet resultSet = new DefaultContinuousResultSet(page1); + + // Then + assertThat(resultSet.iterator().hasNext()).isTrue(); + + assertThat(resultSet.getColumnDefinitions()).isSameAs(page1.getColumnDefinitions()); + assertThat(resultSet.getExecutionInfo()).isSameAs(page1.getExecutionInfo()); + assertThat(resultSet.getExecutionInfos()).containsExactly(page1.getExecutionInfo()); + + Iterator iterator = resultSet.iterator(); + + assertNextRow(iterator, 0); + assertNextRow(iterator, 1); + assertNextRow(iterator, 2); + + assertThat(iterator.hasNext()).isTrue(); + // This should have triggered the fetch of page2 + assertThat(resultSet.getExecutionInfo()).isEqualTo(page2.getExecutionInfo()); + assertThat(resultSet.getExecutionInfos()) + .containsExactly(page1.getExecutionInfo(), page2.getExecutionInfo()); + + assertNextRow(iterator, 3); + assertNextRow(iterator, 4); + assertNextRow(iterator, 5); + + assertThat(iterator.hasNext()).isTrue(); + // This should have triggered the fetch of page3 + assertThat(resultSet.getExecutionInfo()).isEqualTo(page3.getExecutionInfo()); + assertThat(resultSet.getExecutionInfos()) + .containsExactly( + page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); + + assertNextRow(iterator, 6); + assertNextRow(iterator, 7); + assertNextRow(iterator, 8); + } + + private static ContinuousAsyncResultSet mockPage(boolean nextPage, Integer... data) { + ContinuousAsyncResultSet page = Mockito.mock(ContinuousAsyncResultSet.class); + + ColumnDefinitions columnDefinitions = Mockito.mock(ColumnDefinitions.class); + Mockito.when(page.getColumnDefinitions()).thenReturn(columnDefinitions); + + ExecutionInfo executionInfo = Mockito.mock(ExecutionInfo.class); + Mockito.when(page.getExecutionInfo()).thenReturn(executionInfo); + + if (nextPage) { + Mockito.when(page.hasMorePages()).thenReturn(true); + Mockito.when(page.fetchNextPage()).thenReturn(Mockito.spy(new CompletableFuture<>())); + } else { + Mockito.when(page.hasMorePages()).thenReturn(false); + Mockito.when(page.fetchNextPage()).thenThrow(new IllegalStateException()); + } + + Iterator rows = Arrays.asList(data).iterator(); + CountingIterator iterator = + new CountingIterator(data.length) { + @Override + protected Row computeNext() { + return rows.hasNext() ? mockRow(rows.next()) : endOfData(); + } + }; + Mockito.when(page.currentPage()).thenReturn(() -> iterator); + Mockito.when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); + + return page; + } + + private static Row mockRow(int index) { + Row row = Mockito.mock(Row.class); + Mockito.when(row.getInt(0)).thenReturn(index); + return row; + } + + private static void complete( + CompletionStage stage, ContinuousAsyncResultSet result) { + stage.toCompletableFuture().complete(result); + } + + private static void assertNextRow(Iterator iterator, int expectedValue) { + assertThat(iterator.hasNext()).isTrue(); + Row row0 = iterator.next(); + assertThat(row0.getInt(0)).isEqualTo(expectedValue); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java new file mode 100644 index 00000000000..0bfb00695d3 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/continuous/reactive/ContinuousCqlRequestReactiveProcessorTest.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.continuous.reactive; + +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.cql.continuous.reactive.ContinuousReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestAsyncProcessor; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestHandlerTestBase; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.reactivex.Flowable; +import java.util.List; +import org.junit.Test; + +public class ContinuousCqlRequestReactiveProcessorTest extends ContinuousCqlRequestHandlerTestBase { + + @Test + public void should_be_able_to_process_reactive_result_set() { + ContinuousCqlRequestReactiveProcessor processor = + new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); + assertThat( + processor.canProcess( + UNDEFINED_IDEMPOTENCE_STATEMENT, + ContinuousCqlRequestReactiveProcessor.CONTINUOUS_REACTIVE_RESULT_SET)) + .isTrue(); + } + + @Test + public void should_create_request_handler() { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(DSE_V1); + try (RequestHandlerTestHarness harness = builder.build()) { + ContinuousCqlRequestReactiveProcessor processor = + new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()); + assertThat( + processor.process( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test")) + .isInstanceOf(DefaultContinuousReactiveResultSet.class); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_complete_single_page_result(DseProtocolVersion version) { + try (RequestHandlerTestHarness harness = + continuousHarnessBuilder() + .withProtocolVersion(version) + .withResponse(node1, defaultFrameOf(DseTestFixtures.singleDseRow())) + .build()) { + + DefaultSession session = harness.getSession(); + InternalDriverContext context = harness.getContext(); + + ContinuousReactiveResultSet publisher = + new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()) + .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); + + List rows = Flowable.fromPublisher(publisher).toList().blockingGet(); + + assertThat(rows).hasSize(1); + ReactiveRow row = rows.get(0); + assertThat(row.getString("message")).isEqualTo("hello, world"); + ExecutionInfo executionInfo = row.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + + Flowable execInfosFlowable = + Flowable.fromPublisher(publisher.getExecutionInfos()); + assertThat(execInfosFlowable.toList().blockingGet()).containsExactly(executionInfo); + + Flowable colDefsFlowable = + Flowable.fromPublisher(publisher.getColumnDefinitions()); + assertThat(colDefsFlowable.toList().blockingGet()) + .containsExactly(row.getColumnDefinitions()); + + Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); + assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(row.wasApplied()); + } + } + + @Test + @UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class) + public void should_complete_multi_page_result(DseProtocolVersion version) { + RequestHandlerTestHarness.Builder builder = + continuousHarnessBuilder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + DefaultSession session = harness.getSession(); + InternalDriverContext context = harness.getContext(); + + ContinuousReactiveResultSet publisher = + new ContinuousCqlRequestReactiveProcessor(new ContinuousCqlRequestAsyncProcessor()) + .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); + + Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); + rowsPublisher.subscribe(); + + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(2, true))); + + List rows = rowsPublisher.toList().blockingGet(); + assertThat(rows).hasSize(20); + + ReactiveRow first = rows.get(0); + ExecutionInfo firstExecutionInfo = first.getExecutionInfo(); + assertThat(firstExecutionInfo.getCoordinator()).isEqualTo(node1); + assertThat(firstExecutionInfo.getErrors()).isEmpty(); + assertThat(firstExecutionInfo.getIncomingPayload()).isEmpty(); + assertThat(firstExecutionInfo.getPagingState()).isNotNull(); + assertThat(firstExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(firstExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(firstExecutionInfo.getWarnings()).isEmpty(); + + ReactiveRow inSecondPage = rows.get(10); + ExecutionInfo secondExecutionInfo = inSecondPage.getExecutionInfo(); + assertThat(secondExecutionInfo.getCoordinator()).isEqualTo(node1); + assertThat(secondExecutionInfo.getErrors()).isEmpty(); + assertThat(secondExecutionInfo.getIncomingPayload()).isEmpty(); + assertThat(secondExecutionInfo.getPagingState()).isNull(); + assertThat(secondExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(secondExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(secondExecutionInfo.getWarnings()).isEmpty(); + + Flowable execInfosFlowable = + Flowable.fromPublisher(publisher.getExecutionInfos()); + assertThat(execInfosFlowable.toList().blockingGet()) + .containsExactly(firstExecutionInfo, secondExecutionInfo); + + Flowable colDefsFlowable = + Flowable.fromPublisher(publisher.getColumnDefinitions()); + assertThat(colDefsFlowable.toList().blockingGet()) + .containsExactly(first.getColumnDefinitions()); + + Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); + assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(first.wasApplied()); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java new file mode 100644 index 00000000000..a7a6bced9e8 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/CqlRequestReactiveProcessorTest.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import static com.datastax.dse.driver.DseTestFixtures.singleDseRow; +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; +import com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.reactivex.Flowable; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.junit.Test; + +public class CqlRequestReactiveProcessorTest extends CqlRequestHandlerTestBase { + + @Test + public void should_be_able_to_process_reactive_result_set() { + CqlRequestReactiveProcessor processor = + new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); + assertThat( + processor.canProcess( + UNDEFINED_IDEMPOTENCE_STATEMENT, CqlRequestReactiveProcessor.REACTIVE_RESULT_SET)) + .isTrue(); + } + + @Test + public void should_create_request_handler() { + RequestHandlerTestHarness.Builder builder = + RequestHandlerTestHarness.builder().withProtocolVersion(DSE_V1); + try (RequestHandlerTestHarness harness = builder.build()) { + CqlRequestReactiveProcessor processor = + new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()); + assertThat( + processor.process( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test")) + .isInstanceOf(DefaultReactiveResultSet.class); + } + } + + @Test + @UseDataProvider(value = "allDseAndOssProtocolVersions", location = DseTestDataProviders.class) + public void should_complete_single_page_result(ProtocolVersion version) { + try (RequestHandlerTestHarness harness = + RequestHandlerTestHarness.builder() + .withProtocolVersion(version) + .withResponse(node1, defaultFrameOf(singleDseRow())) + .build()) { + + DefaultSession session = harness.getSession(); + InternalDriverContext context = harness.getContext(); + + ReactiveResultSet publisher = + new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()) + .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); + + List rows = Flowable.fromPublisher(publisher).toList().blockingGet(); + + assertThat(rows).hasSize(1); + ReactiveRow row = rows.get(0); + assertThat(row.getString("message")).isEqualTo("hello, world"); + ExecutionInfo executionInfo = row.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + + Flowable execInfosFlowable = + Flowable.fromPublisher(publisher.getExecutionInfos()); + assertThat(execInfosFlowable.toList().blockingGet()).containsExactly(executionInfo); + + Flowable colDefsFlowable = + Flowable.fromPublisher(publisher.getColumnDefinitions()); + assertThat(colDefsFlowable.toList().blockingGet()) + .containsExactly(row.getColumnDefinitions()); + + Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); + assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(row.wasApplied()); + } + } + + @Test + @UseDataProvider(value = "allDseAndOssProtocolVersions", location = DseTestDataProviders.class) + public void should_complete_multi_page_result(ProtocolVersion version) { + RequestHandlerTestHarness.Builder builder = + RequestHandlerTestHarness.builder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (RequestHandlerTestHarness harness = builder.build()) { + + DefaultSession session = harness.getSession(); + InternalDriverContext context = harness.getContext(); + + // The 2nd page is obtained by an "external" call to session.executeAsync(), + // so we need to mock that. + CompletableFuture page2Future = new CompletableFuture<>(); + when(session.executeAsync(any(Statement.class))).thenAnswer(invocation -> page2Future); + ExecutionInfo mockInfo = mock(ExecutionInfo.class); + + ReactiveResultSet publisher = + new CqlRequestReactiveProcessor(new CqlRequestAsyncProcessor()) + .process(UNDEFINED_IDEMPOTENCE_STATEMENT, session, context, "test"); + + Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); + rowsPublisher.subscribe(); + + // emulate arrival of page 1 + node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false))); + + // emulate arrival of page 2 following the call to session.executeAsync() + page2Future.complete( + Conversions.toResultSet( + DseTestFixtures.tenDseRows(2, true), + mockInfo, + harness.getSession(), + harness.getContext())); + + List rows = rowsPublisher.toList().blockingGet(); + assertThat(rows).hasSize(20); + + ReactiveRow first = rows.get(0); + ExecutionInfo firstExecutionInfo = first.getExecutionInfo(); + assertThat(firstExecutionInfo.getCoordinator()).isEqualTo(node1); + assertThat(firstExecutionInfo.getErrors()).isEmpty(); + assertThat(firstExecutionInfo.getIncomingPayload()).isEmpty(); + assertThat(firstExecutionInfo.getPagingState()).isNotNull(); + assertThat(firstExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(firstExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(firstExecutionInfo.getWarnings()).isEmpty(); + + ReactiveRow inSecondPage = rows.get(10); + ExecutionInfo secondExecutionInfo = inSecondPage.getExecutionInfo(); + assertThat(secondExecutionInfo).isSameAs(mockInfo); + + Flowable execInfosFlowable = + Flowable.fromPublisher(publisher.getExecutionInfos()); + assertThat(execInfosFlowable.toList().blockingGet()) + .containsExactly(firstExecutionInfo, secondExecutionInfo); + + Flowable colDefsFlowable = + Flowable.fromPublisher(publisher.getColumnDefinitions()); + assertThat(colDefsFlowable.toList().blockingGet()) + .containsExactly(first.getColumnDefinitions()); + + Flowable wasAppliedFlowable = Flowable.fromPublisher(publisher.wasApplied()); + assertThat(wasAppliedFlowable.toList().blockingGet()).containsExactly(first.wasApplied()); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java new file mode 100644 index 00000000000..a9ff5222460 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/DefaultReactiveResultSetTckTest.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import io.reactivex.Flowable; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +public class DefaultReactiveResultSetTckTest extends PublisherVerification { + + public DefaultReactiveResultSetTckTest() { + super(new TestEnvironment()); + } + + @Override + public Publisher createPublisher(long elements) { + // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. + // Past 3 elements it never checks how many elements have been effectively produced, + // so we can safely cap at, say, 20. + int effective = (int) Math.min(elements, 20L); + return new DefaultReactiveResultSet(() -> createResults(effective)); + } + + @Override + public Publisher createFailedPublisher() { + DefaultReactiveResultSet publisher = new DefaultReactiveResultSet(() -> createResults(1)); + // Since our publisher does not support multiple + // subscriptions, we use that to create a failed publisher. + publisher.subscribe(new TestSubscriber<>()); + return publisher; + } + + private static CompletableFuture createResults(int elements) { + CompletableFuture previous = null; + if (elements > 0) { + // create pages of 5 elements each to exercise pagination + List pages = + Flowable.range(0, elements).buffer(5).map(List::size).toList().blockingGet(); + Collections.reverse(pages); + for (Integer size : pages) { + CompletableFuture future = new CompletableFuture<>(); + future.complete(new MockAsyncResultSet(size, previous)); + previous = future; + } + } else { + previous = new CompletableFuture<>(); + previous.complete(new MockAsyncResultSet(0, null)); + } + return previous; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java new file mode 100644 index 00000000000..3783a2c6922 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockAsyncResultSet.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import static org.mockito.Mockito.mock; + +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.assertj.core.util.Lists; + +public class MockAsyncResultSet implements AsyncResultSet { + + private final List rows; + private final Iterator iterator; + private final CompletionStage nextPage; + private final ExecutionInfo executionInfo = mock(ExecutionInfo.class); + private final ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); + private int remaining; + + public MockAsyncResultSet(int size, CompletionStage nextPage) { + this(IntStream.range(0, size).boxed().map(MockRow::new).collect(Collectors.toList()), nextPage); + } + + public MockAsyncResultSet(List rows, CompletionStage nextPage) { + this.rows = rows; + iterator = rows.iterator(); + remaining = rows.size(); + this.nextPage = nextPage; + } + + @Override + public Row one() { + Row next = iterator.next(); + remaining--; + return next; + } + + @Override + public int remaining() { + return remaining; + } + + @NonNull + @Override + public List currentPage() { + return Lists.newArrayList(rows); + } + + @Override + public boolean hasMorePages() { + return nextPage != null; + } + + @NonNull + @Override + public CompletionStage fetchNextPage() throws IllegalStateException { + return nextPage; + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return columnDefinitions; + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + return executionInfo; + } + + @Override + public boolean wasApplied() { + return true; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java new file mode 100644 index 00000000000..792bfb432f6 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/MockRow.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import static org.mockito.Mockito.mock; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; + +class MockRow implements Row { + + private int index; + + MockRow(int index) { + this.index = index; + } + + @Override + public int size() { + return 0; + } + + @NonNull + @Override + public CodecRegistry codecRegistry() { + return mock(CodecRegistry.class); + } + + @NonNull + @Override + public ProtocolVersion protocolVersion() { + return DefaultProtocolVersion.V4; + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return EmptyColumnDefinitions.INSTANCE; + } + + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + return Collections.singletonList(0); + } + + @Override + public int firstIndexOf(@NonNull String name) { + return 0; + } + + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + return Collections.singletonList(0); + } + + @Override + public int firstIndexOf(@NonNull CqlIdentifier id) { + return 0; + } + + @NonNull + @Override + public DataType getType(int i) { + return DataTypes.INT; + } + + @NonNull + @Override + public DataType getType(@NonNull String name) { + return DataTypes.INT; + } + + @NonNull + @Override + public DataType getType(@NonNull CqlIdentifier id) { + return DataTypes.INT; + } + + @Override + public ByteBuffer getBytesUnsafe(int i) { + return null; + } + + @Override + public boolean isDetached() { + return false; + } + + @Override + public void attach(@NonNull AttachmentPoint attachmentPoint) {} + + // equals and hashCode required for TCK tests that check that two subscribers + // receive the exact same set of items. + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MockRow)) { + return false; + } + MockRow mockRow = (MockRow) o; + return index == mockRow.index; + } + + @Override + public int hashCode() { + return index; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java new file mode 100644 index 00000000000..6a1a5d644e3 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/ReactiveResultSetSubscriptionTest.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.junit.Test; + +public class ReactiveResultSetSubscriptionTest { + + @Test + public void should_retrieve_entire_result_set() { + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + CompletableFuture future3 = new CompletableFuture<>(); + MockAsyncResultSet page1 = new MockAsyncResultSet(3, future2); + MockAsyncResultSet page2 = new MockAsyncResultSet(3, future3); + MockAsyncResultSet page3 = new MockAsyncResultSet(3, null); + TestSubscriber mainSubscriber = new TestSubscriber<>(); + TestSubscriber colDefsSubscriber = new TestSubscriber<>(); + TestSubscriber execInfosSubscriber = new TestSubscriber<>(); + TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); + ReactiveResultSetSubscription subscription = + new ReactiveResultSetSubscription<>( + mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); + mainSubscriber.onSubscribe(subscription); + subscription.start(() -> future1); + future1.complete(page1); + future2.complete(page2); + future3.complete(page3); + mainSubscriber.awaitTermination(); + List expected = new ArrayList<>(page1.currentPage()); + expected.addAll(page2.currentPage()); + expected.addAll(page3.currentPage()); + assertThat(mainSubscriber.getElements()).extracting("row").isEqualTo(expected); + assertThat(colDefsSubscriber.getElements()) + .hasSize(1) + .containsExactly(page1.getColumnDefinitions()); + assertThat(execInfosSubscriber.getElements()) + .hasSize(3) + .containsExactly( + page1.getExecutionInfo(), page2.getExecutionInfo(), page3.getExecutionInfo()); + assertThat(wasAppliedSubscriber.getElements()).hasSize(1).containsExactly(true); + } + + @Test + public void should_report_error_on_first_page() { + CompletableFuture future1 = new CompletableFuture<>(); + TestSubscriber mainSubscriber = new TestSubscriber<>(); + TestSubscriber colDefsSubscriber = new TestSubscriber<>(); + TestSubscriber execInfosSubscriber = new TestSubscriber<>(); + TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); + ReactiveResultSetSubscription subscription = + new ReactiveResultSetSubscription<>( + mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); + mainSubscriber.onSubscribe(subscription); + subscription.start(() -> future1); + future1.completeExceptionally(new UnavailableException(null, null, 0, 0)); + mainSubscriber.awaitTermination(); + assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); + assertThat(colDefsSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); + assertThat(execInfosSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); + assertThat(wasAppliedSubscriber.getError()) + .isNotNull() + .isInstanceOf(UnavailableException.class); + } + + @Test + public void should_report_synchronous_failure_on_first_page() { + TestSubscriber mainSubscriber = new TestSubscriber<>(); + TestSubscriber colDefsSubscriber = new TestSubscriber<>(); + TestSubscriber execInfosSubscriber = new TestSubscriber<>(); + TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); + ReactiveResultSetSubscription subscription = + new ReactiveResultSetSubscription<>( + mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); + mainSubscriber.onSubscribe(subscription); + subscription.start( + () -> { + throw new IllegalStateException(); + }); + mainSubscriber.awaitTermination(); + assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(IllegalStateException.class); + assertThat(colDefsSubscriber.getError()).isNotNull().isInstanceOf(IllegalStateException.class); + assertThat(execInfosSubscriber.getError()) + .isNotNull() + .isInstanceOf(IllegalStateException.class); + assertThat(wasAppliedSubscriber.getError()) + .isNotNull() + .isInstanceOf(IllegalStateException.class); + } + + @Test + public void should_report_error_on_intermediary_page() { + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + MockAsyncResultSet page1 = new MockAsyncResultSet(3, future2); + TestSubscriber mainSubscriber = new TestSubscriber<>(); + TestSubscriber colDefsSubscriber = new TestSubscriber<>(); + TestSubscriber execInfosSubscriber = new TestSubscriber<>(); + TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); + ReactiveResultSetSubscription subscription = + new ReactiveResultSetSubscription<>( + mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); + mainSubscriber.onSubscribe(subscription); + subscription.start(() -> future1); + future1.complete(page1); + future2.completeExceptionally(new UnavailableException(null, null, 0, 0)); + mainSubscriber.awaitTermination(); + assertThat(mainSubscriber.getElements()).extracting("row").isEqualTo(page1.currentPage()); + assertThat(mainSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); + // colDefsSubscriber completed normally when page1 arrived + assertThat(colDefsSubscriber.getError()).isNull(); + assertThat(colDefsSubscriber.getElements()) + .hasSize(1) + .containsExactly(page1.getColumnDefinitions()); + // execInfosSubscriber completed with error, but should have emitted 1 item for page1 + assertThat(execInfosSubscriber.getElements()) + .hasSize(1) + .containsExactly(page1.getExecutionInfo()); + assertThat(execInfosSubscriber.getError()).isNotNull().isInstanceOf(UnavailableException.class); + // colDefsSubscriber completed normally when page1 arrived + assertThat(wasAppliedSubscriber.getElements()).hasSize(1).containsExactly(true); + assertThat(wasAppliedSubscriber.getError()).isNull(); + } + + @Test + public void should_handle_empty_non_final_pages() { + CompletableFuture future1 = new CompletableFuture<>(); + CompletableFuture future2 = new CompletableFuture<>(); + CompletableFuture future3 = new CompletableFuture<>(); + MockAsyncResultSet page1 = new MockAsyncResultSet(10, future2); + MockAsyncResultSet page2 = new MockAsyncResultSet(0, future3); + MockAsyncResultSet page3 = new MockAsyncResultSet(10, null); + TestSubscriber mainSubscriber = new TestSubscriber<>(1); + TestSubscriber colDefsSubscriber = new TestSubscriber<>(); + TestSubscriber execInfosSubscriber = new TestSubscriber<>(); + TestSubscriber wasAppliedSubscriber = new TestSubscriber<>(); + ReactiveResultSetSubscription subscription = + new ReactiveResultSetSubscription<>( + mainSubscriber, colDefsSubscriber, execInfosSubscriber, wasAppliedSubscriber); + mainSubscriber.onSubscribe(subscription); + subscription.start(() -> future1); + future1.complete(page1); + future2.complete(page2); + // emulate backpressure + subscription.request(1); + future3.complete(page3); + subscription.request(Long.MAX_VALUE); + mainSubscriber.awaitTermination(); + assertThat(mainSubscriber.getError()).isNull(); + List expected = new ArrayList<>(page1.currentPage()); + expected.addAll(page3.currentPage()); + assertThat(mainSubscriber.getElements()).hasSize(20).extracting("row").isEqualTo(expected); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java new file mode 100644 index 00000000000..3bdd138beef --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTckTest.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +public class SimpleUnicastProcessorTckTest extends PublisherVerification { + + public SimpleUnicastProcessorTckTest() { + super(new TestEnvironment()); + } + + @Override + public Publisher createPublisher(long elements) { + // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. + // Past 3 elements it never checks how many elements have been effectively produced, + // so we can safely cap at, say, 20. + int effective = (int) Math.min(elements, 20L); + SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); + for (int i = 0; i < effective; i++) { + processor.onNext(i); + } + processor.onComplete(); + return processor; + } + + @Override + public Publisher createFailedPublisher() { + SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); + // Since our publisher does not support multiple + // subscriptions, we use that to create a failed publisher. + processor.subscribe(new TestSubscriber<>()); + return processor; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java new file mode 100644 index 00000000000..3ad2173946b --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/SimpleUnicastProcessorTest.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Test; + +public class SimpleUnicastProcessorTest { + + /** Test for JAVA-2387. */ + @Test + public void should_propagate_upstream_signals_when_downstream_already_subscribed() { + // given + SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); + TestSubscriber subscriber = new TestSubscriber<>(); + // when + processor.subscribe(subscriber); // subscription happens before signals arrive + processor.onNext(1); + processor.onComplete(); + subscriber.awaitTermination(); + // then + assertThat(subscriber.getElements()).hasSize(1).containsExactly(1); + assertThat(subscriber.getError()).isNull(); + } + + @Test + public void should_delay_upstream_signals_until_downstream_is_subscribed() { + // given + SimpleUnicastProcessor processor = new SimpleUnicastProcessor<>(); + TestSubscriber subscriber = new TestSubscriber<>(); + // when + processor.onNext(1); + processor.onComplete(); + processor.subscribe(subscriber); // subscription happens after signals arrive + subscriber.awaitTermination(); + // then + assertThat(subscriber.getElements()).hasSize(1).containsExactly(1); + assertThat(subscriber.getError()).isNull(); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java new file mode 100644 index 00000000000..652155e5309 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/cql/reactive/TestSubscriber.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.cql.reactive; + +import static org.assertj.core.api.Fail.fail; + +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +public class TestSubscriber implements Subscriber { + + private final List elements = new ArrayList<>(); + private final CountDownLatch latch = new CountDownLatch(1); + private final long demand; + private Subscription subscription; + private Throwable error; + + public TestSubscriber() { + this.demand = Long.MAX_VALUE; + } + + public TestSubscriber(long demand) { + this.demand = demand; + } + + @Override + public void onSubscribe(Subscription s) { + if (subscription != null) { + fail("already subscribed"); + } + subscription = s; + subscription.request(demand); + } + + @Override + public void onNext(T t) { + elements.add(t); + } + + @Override + public void onError(Throwable t) { + error = t; + latch.countDown(); + } + + @Override + public void onComplete() { + latch.countDown(); + } + + @Nullable + public Throwable getError() { + return error; + } + + @NonNull + public List getElements() { + return elements; + } + + public void awaitTermination() { + if (!Uninterruptibles.awaitUninterruptibly(latch, 1, TimeUnit.MINUTES)) { + fail("subscriber not terminated"); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java new file mode 100644 index 00000000000..38dc84549c4 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultLineStringTest.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.esri.core.geometry.ogc.OGCLineString; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import org.junit.Test; + +public class DefaultLineStringTest { + private final LineString lineString = + LineString.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)); + + private final String wkt = "LINESTRING (30 10, 10 30, 40 40)"; + + private final String json = + "{\"type\":\"LineString\",\"coordinates\":[[30.0,10.0],[10.0,30.0],[40.0,40.0]]}"; + + @Test + public void should_parse_valid_well_known_text() { + assertThat(LineString.fromWellKnownText(wkt)).isEqualTo(lineString); + } + + @Test + public void should_fail_to_parse_invalid_well_known_text() { + assertInvalidWkt("linestring()"); + assertInvalidWkt("linestring(30 10 20, 10 30 20)"); // 3d + assertInvalidWkt("linestring(0 0, 1 1, 0 1, 1 0)"); // crossing itself + assertInvalidWkt("superlinestring(30 10, 10 30, 40 40)"); + } + + @Test + public void should_convert_to_well_known_text() { + assertThat(lineString.toString()).isEqualTo(wkt); + } + + @Test + public void should_convert_to_well_known_binary() { + ByteBuffer actual = lineString.asWellKnownBinary(); + + ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); + expected.position(0); + expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness + expected.putInt(2); // type + expected.putInt(3); // num lineStrings + expected.putDouble(30); // x1 + expected.putDouble(10); // y1 + expected.putDouble(10); // x2 + expected.putDouble(30); // y2 + expected.putDouble(40); // x3 + expected.putDouble(40); // y3 + expected.flip(); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void should_load_from_well_known_binary() { + ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); + bb.position(0); + bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness + bb.putInt(2); // type + bb.putInt(3); // num lineStrings + bb.putDouble(30); // x1 + bb.putDouble(10); // y1 + bb.putDouble(10); // x2 + bb.putDouble(30); // y2 + bb.putDouble(40); // x3 + bb.putDouble(40); // y3 + bb.flip(); + + assertThat(LineString.fromWellKnownBinary(bb)).isEqualTo(lineString); + } + + @Test + public void should_parse_valid_geo_json() { + assertThat(LineString.fromGeoJson(json)).isEqualTo(lineString); + } + + @Test + public void should_convert_to_geo_json() { + assertThat(lineString.asGeoJson()).isEqualTo(json); + } + + @Test + public void should_convert_to_ogc_line_string() { + assertThat(((DefaultLineString) lineString).getOgcGeometry()).isInstanceOf(OGCLineString.class); + } + + @Test + public void should_produce_same_hashCode_for_equal_objects() { + LineString line1 = + LineString.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)); + LineString line2 = LineString.fromWellKnownText(wkt); + assertThat(line1).isEqualTo(line2); + assertThat(line1.hashCode()).isEqualTo(line2.hashCode()); + } + + @Test + public void should_expose_points() { + assertThat(lineString.getPoints()) + .containsOnly( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)); + assertThat(LineString.fromWellKnownText(wkt).getPoints()) + .containsOnly( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)); + } + + @Test + public void should_encode_and_decode() throws Exception { + assertThat(SerializationUtils.serializeAndDeserialize(lineString)).isEqualTo(lineString); + } + + @Test + public void should_contain_self() { + assertThat(lineString.contains(lineString)).isTrue(); + } + + @Test + public void should_contain_all_intersected_points_except_start_and_end() { + LineString s = + LineString.fromPoints( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 30), + Point.fromCoordinates(30, 30)); + assertThat(s.contains(Point.fromCoordinates(0, 0))).isFalse(); + assertThat(s.contains(Point.fromCoordinates(0, 15))).isTrue(); + assertThat(s.contains(Point.fromCoordinates(0, 30))).isTrue(); + assertThat(s.contains(Point.fromCoordinates(15, 30))).isTrue(); + assertThat(s.contains(Point.fromCoordinates(30, 30))).isFalse(); + } + + @Test + public void should_contain_substring() { + assertThat( + lineString.contains( + LineString.fromPoints( + Point.fromCoordinates(30, 10), Point.fromCoordinates(10, 30)))) + .isTrue(); + } + + @Test + public void should_not_contain_unrelated_string() { + assertThat( + lineString.contains( + LineString.fromPoints( + Point.fromCoordinates(10, 10), Point.fromCoordinates(30, 30)))) + .isFalse(); + } + + @Test + public void should_not_contain_polygon() { + LineString s = + LineString.fromPoints( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 30), + Point.fromCoordinates(30, 30), + Point.fromCoordinates(30, 0)); + LineString p = + LineString.fromPoints( + Point.fromCoordinates(10, 10), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(20, 20), + Point.fromCoordinates(20, 10)); + assertThat(s.contains(p)).isFalse(); + } + + @Test + public void should_accept_empty_shape() throws Exception { + DefaultLineString s = ((DefaultLineString) LineString.fromWellKnownText("LINESTRING EMPTY")); + assertThat(s.getOgcGeometry().isEmpty()).isTrue(); + } + + private void assertInvalidWkt(String s) { + try { + LineString.fromWellKnownText(s); + fail("Should have thrown InvalidTypeException"); + } catch (IllegalArgumentException e) { + // expected + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java new file mode 100644 index 00000000000..1e3a7366741 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPointTest.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.esri.core.geometry.ogc.OGCPoint; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import org.junit.Test; + +public class DefaultPointTest { + + private DefaultPoint point = new DefaultPoint(1.1, 2.2); + + private final String wkt = "POINT (1.1 2.2)"; + + private final String json = "{\"type\":\"Point\",\"coordinates\":[1.1,2.2]}"; + + @Test + public void should_parse_valid_well_known_text() { + assertThat(Point.fromWellKnownText(wkt)).isEqualTo(point); + } + + @Test + public void should_fail_to_parse_invalid_well_known_text() { + assertInvalidWkt("superpoint(1.1 2.2 3.3)"); + } + + @Test + public void should_convert_to_well_known_text() { + assertThat(point.toString()).isEqualTo(wkt); + } + + @Test + public void should_convert_to_well_knowm_binary() { + ByteBuffer actual = point.asWellKnownBinary(); + + ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); + expected.position(0); + expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness + expected.putInt(1); // type + expected.putDouble(1.1); // x + expected.putDouble(2.2); // y + expected.flip(); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void should_load_from_well_known_binary() { + ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); + bb.position(0); + bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness + bb.putInt(1); // type + bb.putDouble(1.1); // x + bb.putDouble(2.2); // y + bb.flip(); + + assertThat(Point.fromWellKnownBinary(bb)).isEqualTo(point); + } + + @Test + public void should_parse_valid_geo_json() { + assertThat(Point.fromGeoJson(json)).isEqualTo(point); + } + + @Test + public void should_convert_to_geo_json() { + assertThat(point.asGeoJson()).isEqualTo(json); + } + + @Test + public void should_convert_to_ogc_point() { + assertThat(point.getOgcGeometry()).isInstanceOf(OGCPoint.class); + } + + @Test + public void should_produce_same_hashCode_for_equal_objects() { + Point point1 = new DefaultPoint(10, 20); + Point point2 = Point.fromWellKnownText("POINT (10 20)"); + assertThat(point1).isEqualTo(point2); + assertThat(point1.hashCode()).isEqualTo(point2.hashCode()); + } + + @Test + public void should_encode_and_decode() throws Exception { + assertThat(SerializationUtils.serializeAndDeserialize(point)).isEqualTo(point); + } + + @Test + public void should_contain_self() { + assertThat(point.contains(point)).isTrue(); + } + + @Test + public void should_not_contain_any_other_shape_than_self() { + DefaultPoint point2 = new DefaultPoint(1, 2); + DefaultPoint point3 = new DefaultPoint(1, 3); + assertThat(point.contains(point2)).isFalse(); + assertThat(point.contains(new DefaultLineString(point, point2))).isFalse(); + assertThat(point.contains(new DefaultPolygon(point, point2, point3))).isFalse(); + } + + @Test + public void should_accept_empty_shape() throws Exception { + DefaultPoint point = ((DefaultPoint) Point.fromWellKnownText("POINT EMPTY")); + assertThat(point.getOgcGeometry().isEmpty()).isTrue(); + } + + private void assertInvalidWkt(String s) { + try { + Point.fromWellKnownText(s); + fail("Should have thrown InvalidTypeException"); + } catch (IllegalArgumentException e) { + // expected + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java new file mode 100644 index 00000000000..d86e9cdc269 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DefaultPolygonTest.java @@ -0,0 +1,342 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.esri.core.geometry.ogc.OGCPolygon; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import org.junit.Test; + +public class DefaultPolygonTest { + + private Polygon polygon = + Polygon.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(20, 40), + Point.fromCoordinates(40, 40)); + + private String wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"; + + private String json = + "{\"type\":\"Polygon\",\"coordinates\":[[[30.0,10.0],[10.0,20.0],[20.0,40.0],[40.0,40.0],[30.0,10.0]]]}"; + + @Test + public void should_parse_valid_well_known_text() { + assertThat(Polygon.fromWellKnownText(wkt)).isEqualTo(polygon); + } + + @Test + public void should_fail_to_parse_invalid_well_known_text() { + assertInvalidWkt("polygon(())"); // malformed + assertInvalidWkt("polygon((30 10 1, 40 40 1, 20 40 1, 10 20 1, 30 10 1))"); // 3d + assertInvalidWkt("polygon((0 0, 1 1, 0 1, 1 0, 0 0))"); // crosses itself + assertInvalidWkt("polygon123((30 10, 40 40, 20 40, 10 20, 30 10))"); // malformed + } + + @Test + public void should_convert_to_well_known_binary() { + ByteBuffer actual = polygon.asWellKnownBinary(); + + ByteBuffer expected = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); + expected.position(0); + expected.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness + expected.putInt(3); // type + expected.putInt(1); // num rings + expected.putInt(5); // num polygons (ring 1/1) + expected.putDouble(30); // x1 + expected.putDouble(10); // y1 + expected.putDouble(40); // x2 + expected.putDouble(40); // y2 + expected.putDouble(20); // x3 + expected.putDouble(40); // y3 + expected.putDouble(10); // x4 + expected.putDouble(20); // y4 + expected.putDouble(30); // x5 + expected.putDouble(10); // y5 + expected.flip(); + + assertThat(actual).isEqualTo(expected); + } + + @Test + public void should_load_from_well_known_binary() { + ByteBuffer bb = ByteBuffer.allocate(1024).order(ByteOrder.nativeOrder()); + bb.position(0); + bb.put((byte) (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? 1 : 0)); // endianness + bb.putInt(3); // type + bb.putInt(1); // num rings + bb.putInt(5); // num polygons (ring 1/1) + bb.putDouble(30); // x1 + bb.putDouble(10); // y1 + bb.putDouble(40); // x2 + bb.putDouble(40); // y2 + bb.putDouble(20); // x3 + bb.putDouble(40); // y3 + bb.putDouble(10); // x4 + bb.putDouble(20); // y4 + bb.putDouble(30); // x5 + bb.putDouble(10); // y5 + bb.flip(); + + assertThat(Polygon.fromWellKnownBinary(bb)).isEqualTo(polygon); + } + + @Test + public void should_parse_valid_geo_json() { + assertThat(Polygon.fromGeoJson(json)).isEqualTo(polygon); + } + + @Test + public void should_convert_to_geo_json() { + assertThat(polygon.asGeoJson()).isEqualTo(json); + } + + @Test + public void should_convert_to_ogc_polygon() { + assertThat(((DefaultPolygon) polygon).getOgcGeometry()).isInstanceOf(OGCPolygon.class); + } + + @Test + public void should_produce_same_hashCode_for_equal_objects() { + Polygon polygon1 = + Polygon.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(20, 40), + Point.fromCoordinates(40, 40)); + Polygon polygon2 = Polygon.fromWellKnownText(wkt); + assertThat(polygon1).isEqualTo(polygon2); + assertThat(polygon1.hashCode()).isEqualTo(polygon2.hashCode()); + } + + @Test + public void should_build_with_constructor_without_checking_orientation() { + // By default, OGC requires outer rings to be clockwise and inner rings to be counterclockwise. + // We disable that in our constructors. + // This polygon has a single outer ring that is counterclockwise. + Polygon polygon = + Polygon.fromPoints( + Point.fromCoordinates(5, 0), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(0, 0)); + assertThat(polygon.asWellKnownText()).isEqualTo("POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0))"); + } + + @Test + public void should_build_complex_polygon_with_builder() { + Polygon polygon = + Polygon.builder() + .addRing( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)) + .addRing( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(2, 1)) + .addRing( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(3, 2), + Point.fromCoordinates(4, 2), + Point.fromCoordinates(4, 1)) + .build(); + assertThat(polygon.asWellKnownText()) + .isEqualTo( + "POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1), (3 1, 3 2, 4 2, 4 1, 3 1))"); + } + + @Test + public void should_expose_rings() { + assertThat(polygon.getExteriorRing()) + .containsOnly( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(20, 40), + Point.fromCoordinates(40, 40)); + assertThat(polygon.getInteriorRings().isEmpty()).isTrue(); + + Polygon fromWkt = Polygon.fromWellKnownText(wkt); + assertThat(fromWkt.getExteriorRing()) + .containsOnly( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(20, 40), + Point.fromCoordinates(40, 40)); + assertThat(fromWkt.getInteriorRings().isEmpty()).isTrue(); + + Polygon complex = + Polygon.builder() + .addRing( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)) + .addRing( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(2, 1)) + .addRing( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(3, 2), + Point.fromCoordinates(4, 2), + Point.fromCoordinates(4, 1)) + .build(); + assertThat(complex.getExteriorRing()) + .containsOnly( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)); + assertThat(complex.getInteriorRings()).hasSize(2); + assertThat(complex.getInteriorRings().get(0)) + .containsOnly( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(2, 1)); + assertThat(complex.getInteriorRings().get(1)) + .containsOnly( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(3, 2), + Point.fromCoordinates(4, 2), + Point.fromCoordinates(4, 1)); + + Polygon complexFromWkt = + Polygon.fromWellKnownText( + "POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1), (3 1, 3 2, 4 2, 4 1, 3 1))"); + assertThat(complexFromWkt.getExteriorRing()) + .containsOnly( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)); + assertThat(complexFromWkt.getInteriorRings()).hasSize(2); + assertThat(complexFromWkt.getInteriorRings().get(0)) + .containsOnly( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(2, 1)); + assertThat(complexFromWkt.getInteriorRings().get(1)) + .containsOnly( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(3, 2), + Point.fromCoordinates(4, 2), + Point.fromCoordinates(4, 1)); + } + + @Test + public void should_encode_and_decode() throws Exception { + assertThat(SerializationUtils.serializeAndDeserialize(polygon)).isEqualTo(polygon); + } + + @Test + public void should_contain_self() { + assertThat(polygon.contains(polygon)).isTrue(); + } + + @Test + public void should_not_contain_point_or_linestring_on_exterior_ring() { + assertThat(polygon.contains(Point.fromCoordinates(30, 10))).isFalse(); + assertThat(polygon.contains(Point.fromCoordinates(30, 40))).isFalse(); + assertThat( + polygon.contains( + LineString.fromPoints( + Point.fromCoordinates(35, 40), Point.fromCoordinates(25, 40)))) + .isFalse(); + } + + @Test + public void should_contain_interior_shape() { + assertThat(polygon.contains(Point.fromCoordinates(20, 20))).isTrue(); + assertThat( + polygon.contains( + LineString.fromPoints( + Point.fromCoordinates(20, 20), Point.fromCoordinates(30, 20)))) + .isTrue(); + assertThat( + polygon.contains( + Polygon.fromPoints( + Point.fromCoordinates(20, 20), + Point.fromCoordinates(30, 20), + Point.fromCoordinates(20, 30)))) + .isTrue(); + } + + @Test + public void should_not_contain_exterior_shape() { + assertThat(polygon.contains(Point.fromCoordinates(10, 10))).isFalse(); + assertThat( + polygon.contains( + LineString.fromPoints( + Point.fromCoordinates(10, 10), Point.fromCoordinates(20, 20)))) + .isFalse(); + assertThat( + polygon.contains( + Polygon.fromPoints( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 10), + Point.fromCoordinates(10, 10)))) + .isFalse(); + } + + @Test + public void should_not_contain_shapes_in_interior_hole() { + Polygon complex = + Polygon.builder() + .addRing( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(30, 0), + Point.fromCoordinates(30, 30), + Point.fromCoordinates(0, 30)) + .addRing( + Point.fromCoordinates(10, 10), + Point.fromCoordinates(20, 10), + Point.fromCoordinates(20, 20), + Point.fromCoordinates(10, 20)) + .build(); + assertThat(complex.contains(Point.fromCoordinates(15, 15))).isFalse(); + } + + @Test + public void should_accept_empty_shape() throws Exception { + Polygon polygon = Polygon.fromWellKnownText("POLYGON EMPTY"); + assertThat(polygon.getExteriorRing()).isEmpty(); + assertThat(((DefaultPolygon) polygon).getOgcGeometry().isEmpty()).isTrue(); + } + + private void assertInvalidWkt(String s) { + try { + Polygon.fromWellKnownText(s); + fail("Should have thrown InvalidTypeException"); + } catch (IllegalArgumentException e) { + // expected + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java new file mode 100644 index 00000000000..ba158288891 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/DistanceTest.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import org.junit.Test; + +public class DistanceTest { + + private final Point point = Point.fromCoordinates(1.1, 2.2); + private final Distance distance = new Distance(point, 7.0); + private final String wkt = "DISTANCE((1.1 2.2) 7.0)"; + + @Test + public void should_parse_valid_well_known_text() { + Distance fromWkt = Distance.fromWellKnownText(wkt); + assertThat(fromWkt.getRadius()).isEqualTo(7.0); + assertThat(fromWkt.getCenter()).isEqualTo(point); + assertThat(Distance.fromWellKnownText(wkt)).isEqualTo(distance); + // whitespace doesn't matter between distance and spec. + assertThat(Distance.fromWellKnownText("DISTANCE ((1.1 2.2) 7.0)")).isEqualTo(distance); + // case doesn't matter. + assertThat(Distance.fromWellKnownText("distance((1.1 2.2) 7.0)")).isEqualTo(distance); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_well_known_text() { + Distance.fromWellKnownText("dist((1.1 2.2) 3.3)"); + } + + @Test + public void should_convert_to_well_known_text() { + assertThat(distance.asWellKnownText()).isEqualTo(wkt); + } + + @Test + public void should_contain_point() { + assertThat(distance.contains(Point.fromCoordinates(2.0, 3.0))).isTrue(); + } + + @Test + public void should_not_contain_point() { + // y axis falls outside of distance + assertThat(distance.contains(Point.fromCoordinates(2.0, 9.3))).isFalse(); + } + + @Test + public void should_contain_linestring() { + assertThat( + distance.contains( + LineString.fromPoints( + Point.fromCoordinates(2.0, 3.0), + Point.fromCoordinates(3.1, 6.2), + Point.fromCoordinates(-1.0, -2.0)))) + .isTrue(); + } + + @Test + public void should_not_contain_linestring() { + // second point falls outside of distance at y axis. + assertThat( + distance.contains( + LineString.fromPoints( + Point.fromCoordinates(2.0, 3.0), + Point.fromCoordinates(3.1, 9.2), + Point.fromCoordinates(-1.0, -2.0)))) + .isFalse(); + } + + @Test + public void should_contain_polygon() { + Polygon polygon = + Polygon.fromPoints( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 4), + Point.fromCoordinates(4, 4)); + assertThat(distance.contains(polygon)).isTrue(); + } + + @Test + public void should_not_contain_polygon() { + Polygon polygon = + Polygon.fromPoints( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 4), + Point.fromCoordinates(10, 4)); + // final point falls outside of distance at x axis. + assertThat(distance.contains(polygon)).isFalse(); + } + + @Test(expected = UnsupportedOperationException.class) + public void should_fail_to_convert_to_ogc() { + distance.getOgcGeometry(); + } + + @Test(expected = UnsupportedOperationException.class) + public void should_fail_to_convert_to_wkb() { + distance.asWellKnownBinary(); + } + + @Test(expected = UnsupportedOperationException.class) + public void should_fail_to_convert_to_geo_json() { + distance.asGeoJson(); + } + + @Test + public void should_serialize_and_deserialize() throws Exception { + assertThat(SerializationUtils.serializeAndDeserialize(distance)).isEqualTo(distance); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java new file mode 100644 index 00000000000..84bd1dab343 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/data/geometry/SerializationUtils.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +public class SerializationUtils { + + public static Object serializeAndDeserialize(Geometry geometry) + throws IOException, ClassNotFoundException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream out = new ObjectOutputStream(baos); + + out.writeObject(geometry); + + byte[] bytes = baos.toByteArray(); + if (!(geometry instanceof Distance)) { + byte[] wkb = Bytes.getArray(geometry.asWellKnownBinary()); + assertThat(bytes).containsSequence(wkb); + } + ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes)); + return in.readObject(); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java new file mode 100644 index 00000000000..c67be162181 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerSpeculativeExecutionTest.java @@ -0,0 +1,530 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.singleGraphRow; +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; +import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.response.Error; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * These tests are almost exact copies of {@link + * com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerSpeculativeExecutionTest}. + */ +@RunWith(DataProviderRunner.class) +public class ContinuousGraphRequestHandlerSpeculativeExecutionTest { + + @Mock DefaultNode node1; + @Mock DefaultNode node2; + @Mock DefaultNode node3; + + @Mock NodeMetricUpdater nodeMetricUpdater1; + @Mock NodeMetricUpdater nodeMetricUpdater2; + @Mock NodeMetricUpdater nodeMetricUpdater3; + + @Mock GraphSupportChecker graphSupportChecker; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(node1.getMetricUpdater()).thenReturn(nodeMetricUpdater1); + when(node2.getMetricUpdater()).thenReturn(nodeMetricUpdater2); + when(node3.getMetricUpdater()).thenReturn(nodeMetricUpdater3); + when(nodeMetricUpdater1.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); + when(nodeMetricUpdater2.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); + when(nodeMetricUpdater3.isEnabled(any(NodeMetric.class), anyString())).thenReturn(true); + when(graphSupportChecker.inferGraphProtocol(any(), any(), any())) + .thenReturn(GraphProtocol.GRAPH_BINARY_1_0); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "nonIdempotentGraphConfig") + public void should_not_schedule_speculative_executions_if_not_idempotent( + boolean defaultIdempotence, GraphStatement statement) { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + + // should not schedule any timeout + assertThat(harness.nextScheduledTimeout()).isNull(); + + verifyNoMoreInteractions(speculativeExecutionPolicy); + verifyNoMoreInteractions(nodeMetricUpdater1); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_schedule_speculative_executions( + boolean defaultIdempotence, GraphStatement statement) throws Exception { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); + PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + long secondExecutionDelay = 200L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(2))) + .thenReturn(secondExecutionDelay); + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(3))) + .thenReturn(-1L); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + + CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(firstExecutionDelay); + verifyNoMoreInteractions(nodeMetricUpdater1); + speculativeExecution1.task().run(speculativeExecution1); + verify(nodeMetricUpdater1) + .incrementCounter( + DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); + node2Behavior.verifyWrite(); + node2Behavior.setWriteSuccess(); + + CapturedTimeout speculativeExecution2 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution2.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(secondExecutionDelay); + verifyNoMoreInteractions(nodeMetricUpdater2); + speculativeExecution2.task().run(speculativeExecution2); + verify(nodeMetricUpdater2) + .incrementCounter( + DefaultNodeMetric.SPECULATIVE_EXECUTIONS, DriverExecutionProfile.DEFAULT_NAME); + node3Behavior.verifyWrite(); + node3Behavior.setWriteSuccess(); + + // No more scheduled tasks since the policy returns 0 on the third call. + assertThat(harness.nextScheduledTimeout()).isNull(); + + // Note that we don't need to complete any response, the test is just about checking that + // executions are started. + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_not_start_execution_if_result_complete( + boolean defaultIdempotence, GraphStatement statement) throws Exception { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder() + .withGraphTimeout(Duration.ofSeconds(10)) + .withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + ContinuousGraphRequestHandler requestHandler = + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker); + CompletionStage resultSetFuture = requestHandler.handle(); + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + + // The first timeout scheduled should be the global timeout + CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); + assertThat(globalTimeout.getDelay(TimeUnit.SECONDS)).isEqualTo(10); + + // Check that the first execution was scheduled but don't run it yet + CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(firstExecutionDelay); + + // Complete the request from the initial execution + node1Behavior.setResponseSuccess( + defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); + assertThatStage(resultSetFuture).isSuccess(); + + // Pending speculative executions should have been cancelled. However we don't check + // firstExecutionTask directly because the request handler's onResponse can sometimes be + // invoked before operationComplete (this is very unlikely in practice, but happens in our + // Travis CI build). When that happens, the speculative execution is not recorded yet when + // cancelScheduledTasks runs. + + // The fact that we missed the speculative execution is not a problem; even if it starts, it + // will eventually find out that the result is already complete and cancel itself: + speculativeExecution1.task().run(speculativeExecution1); + node2Behavior.verifyNoWrite(); + + verify(nodeMetricUpdater1) + .updateTimer( + eq(DseNodeMetric.GRAPH_MESSAGES), + eq(DriverExecutionProfile.DEFAULT_NAME), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + verifyNoMoreInteractions(nodeMetricUpdater1); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_fail_if_no_nodes(boolean defaultIdempotence, GraphStatement statement) { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + // No configured behaviors => will yield an empty query plan + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + CompletionStage resultSetFuture = + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + + assertThatStage(resultSetFuture) + .isFailed(error -> assertThat(error).isInstanceOf(NoNodeAvailableException.class)); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_fail_if_no_more_nodes_and_initial_execution_is_last( + boolean defaultIdempotence, GraphStatement statement) throws Exception { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + harnessBuilder.withResponse( + node2, + defaultDseFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + CompletionStage resultSetFuture = + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + // do not simulate a response from node1 yet + + // Run the next scheduled task to start the speculative execution. node2 will reply with a + // BOOTSTRAPPING error, causing a RETRY_NEXT; but the query plan is now empty so the + // speculative execution stops. + // next scheduled timeout should be the first speculative execution. Get it and run it. + CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(firstExecutionDelay); + speculativeExecution1.task().run(speculativeExecution1); + + // node1 now replies with the same response, that triggers a RETRY_NEXT + node1Behavior.setResponseSuccess( + defaultDseFrameOf( + new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); + + // But again the query plan is empty so that should fail the request + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(AllNodesFailedException.class); + Map> nodeErrors = + ((AllNodesFailedException) error).getAllErrors(); + assertThat(nodeErrors).containsOnlyKeys(node1, node2); + assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); + assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); + }); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( + boolean defaultIdempotence, GraphStatement statement) throws Exception { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + CompletionStage resultSetFuture = + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + // do not simulate a response from node1 yet + + // next scheduled timeout should be the first speculative execution. Get it and run it. + CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(firstExecutionDelay); + speculativeExecution1.task().run(speculativeExecution1); + + // node1 now replies with a BOOTSTRAPPING error that triggers a RETRY_NEXT + // but the query plan is empty so the initial execution stops + node1Behavior.setResponseSuccess( + defaultDseFrameOf( + new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); + + // Same thing with node2, so the speculative execution should reach the end of the query plan + // and fail the request + node2Behavior.setResponseSuccess( + defaultDseFrameOf( + new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); + + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(AllNodesFailedException.class); + Map> nodeErrors = + ((AllNodesFailedException) error).getAllErrors(); + assertThat(nodeErrors).containsOnlyKeys(node1, node2); + assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); + assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); + }); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_retry_in_speculative_executions( + boolean defaultIdempotence, GraphStatement statement) throws Exception { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); + PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + CompletionStage resultSetFuture = + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + // do not simulate a response from node1. The request will stay hanging for the rest of this + // test + + // next scheduled timeout should be the first speculative execution. Get it and run it. + CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(firstExecutionDelay); + speculativeExecution1.task().run(speculativeExecution1); + + node2Behavior.verifyWrite(); + node2Behavior.setWriteSuccess(); + + // node2 replies with a response that triggers a RETRY_NEXT + node2Behavior.setResponseSuccess( + defaultDseFrameOf( + new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); + + node3Behavior.setResponseSuccess( + defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); + + // The second execution should move to node3 and complete the request + assertThatStage(resultSetFuture).isSuccess(); + + // The request to node1 was still in flight, it should have been cancelled + node1Behavior.verifyCancellation(); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig") + public void should_stop_retrying_other_executions_if_result_complete( + boolean defaultIdempotence, GraphStatement statement) throws Exception { + GraphRequestHandlerTestHarness.Builder harnessBuilder = + GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); + PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); + PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2); + PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3); + + try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) { + SpeculativeExecutionPolicy speculativeExecutionPolicy = + harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME); + long firstExecutionDelay = 100L; + when(speculativeExecutionPolicy.nextExecution( + any(Node.class), eq(null), eq(statement), eq(1))) + .thenReturn(firstExecutionDelay); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + CompletionStage resultSetFuture = + new ContinuousGraphRequestHandler( + statement, + harness.getSession(), + harness.getContext(), + "test", + module, + graphSupportChecker) + .handle(); + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + + // next scheduled timeout should be the first speculative execution. Get it and run it. + CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout(); + assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)) + .isEqualTo(firstExecutionDelay); + speculativeExecution1.task().run(speculativeExecution1); + + node2Behavior.verifyWrite(); + node2Behavior.setWriteSuccess(); + + // Complete the request from the initial execution + node1Behavior.setResponseSuccess( + defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module))); + assertThatStage(resultSetFuture).isSuccess(); + + // node2 replies with a response that would trigger a RETRY_NEXT if the request was still + // running + node2Behavior.setResponseSuccess( + defaultDseFrameOf( + new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message"))); + + // The speculative execution should not move to node3 because it is stopped + node3Behavior.verifyNoWrite(); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java new file mode 100644 index 00000000000..b374539f12e --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/ContinuousGraphRequestHandlerTest.java @@ -0,0 +1,260 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.tenGraphRows; +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class ContinuousGraphRequestHandlerTest { + + @Mock DefaultDriverContext mockContext; + @Mock DefaultNode node; + @Mock NodeMetricUpdater nodeMetricUpdater1; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(node.getMetricUpdater()).thenReturn(nodeMetricUpdater1); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_return_paged_results(GraphProtocol graphProtocol) throws IOException { + String profileName = "test-graph"; + when(nodeMetricUpdater1.isEnabled(DseNodeMetric.GRAPH_MESSAGES, profileName)).thenReturn(true); + + GraphBinaryModule module = createGraphBinaryModule(mockContext); + + GraphRequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder().withGraphProtocolForTestConfig(graphProtocol); + PoolBehavior node1Behavior = builder.customBehavior(node); + + try (RequestHandlerTestHarness harness = builder.build()) { + + GraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName(profileName); + + ContinuousGraphRequestHandler handler = + new ContinuousGraphRequestHandler( + graphStatement, + harness.getSession(), + harness.getContext(), + "test", + module, + new GraphSupportChecker()); + + // send the initial request + CompletionStage page1Future = handler.handle(); + + node1Behavior.setResponseSuccess( + defaultDseFrameOf(tenGraphRows(graphProtocol, module, 1, false))); + + assertThatStage(page1Future) + .isSuccess( + page1 -> { + assertThat(page1.hasMorePages()).isTrue(); + assertThat(page1.currentPage()).hasSize(10).allMatch(GraphNode::isVertex); + ExecutionInfo executionInfo = page1.getRequestExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + }); + + AsyncGraphResultSet page1 = CompletableFutures.getCompleted(page1Future); + CompletionStage page2Future = page1.fetchNextPage(); + + node1Behavior.setResponseSuccess( + defaultDseFrameOf(tenGraphRows(graphProtocol, module, 2, true))); + + assertThatStage(page2Future) + .isSuccess( + page2 -> { + assertThat(page2.hasMorePages()).isFalse(); + assertThat(page2.currentPage()).hasSize(10).allMatch(GraphNode::isVertex); + ExecutionInfo executionInfo = page2.getRequestExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + }); + + validateMetrics(profileName, harness); + } + } + + @Test + public void should_honor_default_timeout() throws Exception { + // given + GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext); + Duration defaultTimeout = Duration.ofSeconds(1); + + RequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout); + PoolBehavior node1Behavior = builder.customBehavior(node); + + try (RequestHandlerTestHarness harness = builder.build()) { + + DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile(); + when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true); + when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) + .thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); + + GraphStatement graphStatement = ScriptGraphStatement.newInstance("mockQuery"); + + // when + ContinuousGraphRequestHandler handler = + new ContinuousGraphRequestHandler( + graphStatement, + harness.getSession(), + harness.getContext(), + "test", + binaryModule, + new GraphSupportChecker()); + + // send the initial request + CompletionStage page1Future = handler.handle(); + + // acknowledge the write, will set the global timeout + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + + CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); + assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(defaultTimeout.toNanos()); + + // will trigger the global timeout and complete it exceptionally + globalTimeout.task().run(globalTimeout); + assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally(); + + assertThatThrownBy(() -> page1Future.toCompletableFuture().get()) + .hasRootCauseExactlyInstanceOf(DriverTimeoutException.class) + .hasMessageContaining("Query timed out after " + defaultTimeout); + } + } + + @Test + public void should_honor_statement_timeout() throws Exception { + // given + GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext); + Duration defaultTimeout = Duration.ofSeconds(1); + Duration statementTimeout = Duration.ofSeconds(2); + + RequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout); + PoolBehavior node1Behavior = builder.customBehavior(node); + + try (RequestHandlerTestHarness harness = builder.build()) { + + DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile(); + when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true); + when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) + .thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); + + GraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery").setTimeout(statementTimeout); + + // when + ContinuousGraphRequestHandler handler = + new ContinuousGraphRequestHandler( + graphStatement, + harness.getSession(), + harness.getContext(), + "test", + binaryModule, + new GraphSupportChecker()); + + // send the initial request + CompletionStage page1Future = handler.handle(); + + // acknowledge the write, will set the global timeout + node1Behavior.verifyWrite(); + node1Behavior.setWriteSuccess(); + + CapturedTimeout globalTimeout = harness.nextScheduledTimeout(); + assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)) + .isEqualTo(statementTimeout.toNanos()); + + // will trigger the global timeout and complete it exceptionally + globalTimeout.task().run(globalTimeout); + assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally(); + + assertThatThrownBy(() -> page1Future.toCompletableFuture().get()) + .hasRootCauseExactlyInstanceOf(DriverTimeoutException.class) + .hasMessageContaining("Query timed out after " + statementTimeout); + } + } + + private void validateMetrics(String profileName, RequestHandlerTestHarness harness) { + // GRAPH_MESSAGES metrics update is invoked only for the first page + verify(nodeMetricUpdater1, times(1)) + .updateTimer( + eq(DseNodeMetric.GRAPH_MESSAGES), eq(profileName), anyLong(), eq(TimeUnit.NANOSECONDS)); + verifyNoMoreInteractions(nodeMetricUpdater1); + + verify(harness.getSession().getMetricUpdater()) + .updateTimer( + eq(DseSessionMetric.GRAPH_REQUESTS), eq(null), anyLong(), eq(TimeUnit.NANOSECONDS)); + verifyNoMoreInteractions(harness.getSession().getMetricUpdater()); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java new file mode 100644 index 00000000000..1814b12aa4e --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphExecutionInfoConverterTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.servererrors.ServerError; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; +import java.util.AbstractMap.SimpleEntry; +import java.util.Collections; +import java.util.List; +import java.util.Map.Entry; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.Strict.class) +@SuppressWarnings("deprecation") +public class GraphExecutionInfoConverterTest { + + @Mock GraphStatement request; + @Mock Node node; + + private List> errors; + private List warnings; + private ImmutableMap payload; + + @Before + public void setUp() { + errors = + Collections.singletonList( + new SimpleEntry<>(node, new ServerError(node, "this is a server error"))); + warnings = Collections.singletonList("this is a warning"); + payload = ImmutableMap.of("key", Bytes.fromHexString("0xcafebabe")); + } + + @Test + public void should_convert_to_graph_execution_info() { + + // given + ExecutionInfo executionInfo = mock(ExecutionInfo.class); + when(executionInfo.getRequest()).thenReturn(request); + when(executionInfo.getCoordinator()).thenReturn(node); + when(executionInfo.getSpeculativeExecutionCount()).thenReturn(42); + when(executionInfo.getSuccessfulExecutionIndex()).thenReturn(10); + when(executionInfo.getErrors()).thenReturn(errors); + when(executionInfo.getWarnings()).thenReturn(warnings); + when(executionInfo.getIncomingPayload()).thenReturn(payload); + + // when + com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo = + GraphExecutionInfoConverter.convert(executionInfo); + + // then + assertThat(graphExecutionInfo.getStatement()).isSameAs(request); + assertThat(graphExecutionInfo.getCoordinator()).isSameAs(node); + assertThat(graphExecutionInfo.getSpeculativeExecutionCount()).isEqualTo(42); + assertThat(graphExecutionInfo.getSuccessfulExecutionIndex()).isEqualTo(10); + assertThat(graphExecutionInfo.getErrors()).isEqualTo(errors); + assertThat(graphExecutionInfo.getWarnings()).isEqualTo(warnings); + assertThat(graphExecutionInfo.getIncomingPayload()).isEqualTo(payload); + } + + @Test + public void should_convert_from_graph_execution_info() { + + // given + com.datastax.dse.driver.api.core.graph.GraphExecutionInfo graphExecutionInfo = + mock(com.datastax.dse.driver.api.core.graph.GraphExecutionInfo.class); + when(graphExecutionInfo.getStatement()).thenAnswer(args -> request); + when(graphExecutionInfo.getCoordinator()).thenReturn(node); + when(graphExecutionInfo.getSpeculativeExecutionCount()).thenReturn(42); + when(graphExecutionInfo.getSuccessfulExecutionIndex()).thenReturn(10); + when(graphExecutionInfo.getErrors()).thenReturn(errors); + when(graphExecutionInfo.getWarnings()).thenReturn(warnings); + when(graphExecutionInfo.getIncomingPayload()).thenReturn(payload); + + // when + ExecutionInfo executionInfo = GraphExecutionInfoConverter.convert(graphExecutionInfo); + + // then + assertThat(executionInfo.getRequest()).isSameAs(request); + assertThatThrownBy(executionInfo::getStatement).isInstanceOf(ClassCastException.class); + assertThat(executionInfo.getCoordinator()).isSameAs(node); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(42); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(10); + assertThat(executionInfo.getErrors()).isEqualTo(errors); + assertThat(executionInfo.getWarnings()).isEqualTo(warnings); + assertThat(executionInfo.getIncomingPayload()).isEqualTo(payload); + assertThat(executionInfo.getPagingState()).isNull(); + assertThat(executionInfo.isSchemaInAgreement()).isTrue(); + assertThat(executionInfo.getQueryTraceAsync()).isCompletedExceptionally(); + assertThatThrownBy(executionInfo::getQueryTrace) + .isInstanceOf(IllegalStateException.class) + .hasMessage("Tracing was disabled for this request"); + assertThat(executionInfo.getResponseSizeInBytes()).isEqualTo(-1L); + assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1L); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java new file mode 100644 index 00000000000..d7ded441e70 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphNodeTest.java @@ -0,0 +1,299 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; +import org.apache.tinkerpop.gremlin.process.traversal.Traverser; +import org.apache.tinkerpop.gremlin.process.traversal.step.util.EmptyPath; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; +import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedEdge; +import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedProperty; +import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertex; +import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertexProperty; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class GraphNodeTest { + + private GraphBinaryModule graphBinaryModule; + + @Before + public void setup() { + DefaultDriverContext dseDriverContext = mock(DefaultDriverContext.class); + when(dseDriverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); + when(dseDriverContext.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); + + TypeSerializerRegistry registry = + GraphBinaryModule.createDseTypeSerializerRegistry(dseDriverContext); + graphBinaryModule = + new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); + } + + @Test + public void should_not_support_set_for_graphson_2_0() throws IOException { + // when + GraphNode graphNode = serdeAndCreateGraphNode(ImmutableSet.of("value"), GRAPHSON_2_0); + + // then + assertThat(graphNode.isSet()).isFalse(); + } + + @Test + public void should_throw_for_set_for_graphson_1_0() throws IOException { + // when + GraphNode graphNode = serdeAndCreateGraphNode(ImmutableSet.of("value"), GRAPHSON_1_0); + + // then + assertThat(graphNode.isSet()).isFalse(); + assertThatThrownBy(graphNode::asSet).isExactlyInstanceOf(UnsupportedOperationException.class); + } + + @Test + @UseDataProvider(value = "allGraphProtocols") + public void should_create_graph_node_for_list(GraphProtocol graphVersion) throws IOException { + // when + GraphNode graphNode = serdeAndCreateGraphNode(ImmutableList.of("value"), graphVersion); + + // then + assertThat(graphNode.isList()).isTrue(); + List result = graphNode.asList(); + assertThat(result).isEqualTo(ImmutableList.of("value")); + } + + @Test + @UseDataProvider("allGraphProtocols") + public void should_create_graph_node_for_map(GraphProtocol graphProtocol) throws IOException { + // when + GraphNode graphNode = serdeAndCreateGraphNode(ImmutableMap.of("value", 1234), graphProtocol); + + // then + assertThat(graphNode.isMap()).isTrue(); + Map result = graphNode.asMap(); + assertThat(result).isEqualTo(ImmutableMap.of("value", 1234)); + } + + @Test + @UseDataProvider("graphson1_0and2_0") + public void should_create_graph_node_for_map_for_non_string_key(GraphProtocol graphProtocol) + throws IOException { + // when + GraphNode graphNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); + + // then + assertThat(graphNode.isMap()).isTrue(); + Map result = graphNode.asMap(); + assertThat(result).isEqualTo(ImmutableMap.of("12", 1234)); + } + + @Test + @UseDataProvider(value = "allGraphProtocols") + public void should_calculate_size_of_collection_types(GraphProtocol graphProtocol) + throws IOException { + // when + GraphNode mapNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); + GraphNode setNode = serdeAndCreateGraphNode(ImmutableSet.of(12, 1234), graphProtocol); + GraphNode listNode = serdeAndCreateGraphNode(ImmutableList.of(12, 1234, 99999), graphProtocol); + + // then + assertThat(mapNode.size()).isEqualTo(1); + assertThat(setNode.size()).isEqualTo(2); + assertThat(listNode.size()).isEqualTo(3); + } + + @Test + @UseDataProvider(value = "allGraphProtocols") + public void should_return_is_value_only_for_scalar_value(GraphProtocol graphProtocol) + throws IOException { + // when + GraphNode mapNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); + GraphNode setNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); + GraphNode listNode = serdeAndCreateGraphNode(ImmutableMap.of(12, 1234), graphProtocol); + GraphNode vertexNode = + serdeAndCreateGraphNode(new DetachedVertex("a", "l", null), graphProtocol); + GraphNode edgeNode = + serdeAndCreateGraphNode( + new DetachedEdge("a", "l", Collections.emptyMap(), "v1", "l1", "v2", "l2"), + graphProtocol); + GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), graphProtocol); + GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), graphProtocol); + GraphNode vertexPropertyNode = + serdeAndCreateGraphNode( + new DetachedVertexProperty<>("id", "l", "v", null, new DetachedVertex("a", "l", null)), + graphProtocol); + GraphNode scalarValueNode = serdeAndCreateGraphNode(true, graphProtocol); + + // then + assertThat(mapNode.isValue()).isFalse(); + assertThat(setNode.isValue()).isFalse(); + assertThat(listNode.isValue()).isFalse(); + assertThat(vertexNode.isValue()).isFalse(); + assertThat(edgeNode.isValue()).isFalse(); + assertThat(pathNode.isValue()).isFalse(); + assertThat(propertyNode.isValue()).isFalse(); + assertThat(vertexPropertyNode.isValue()).isFalse(); + assertThat(scalarValueNode.isValue()).isTrue(); + } + + @Test + @UseDataProvider("objectGraphNodeProtocols") + public void should_check_if_node_is_property_not_map(GraphProtocol graphProtocol) + throws IOException { + // when + GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), graphProtocol); + + // then + assertThat(propertyNode.isProperty()).isTrue(); + assertThat(propertyNode.isMap()).isFalse(); + assertThat(propertyNode.asProperty()).isNotNull(); + } + + @Test + public void should_check_if_node_is_property_or_map_for_1_0() throws IOException { + // when + GraphNode propertyNode = serdeAndCreateGraphNode(new DetachedProperty<>("a", 1), GRAPHSON_1_0); + + // then + assertThat(propertyNode.isProperty()).isTrue(); + assertThat(propertyNode.isMap()).isTrue(); + assertThat(propertyNode.asProperty()).isNotNull(); + } + + @Test + @UseDataProvider("allGraphProtocols") + public void should_check_if_node_is_vertex_property(GraphProtocol graphProtocol) + throws IOException { + // when + GraphNode vertexPropertyNode = + serdeAndCreateGraphNode( + new DetachedVertexProperty<>("id", "l", "v", null, new DetachedVertex("a", "l", null)), + graphProtocol); + + // then + assertThat(vertexPropertyNode.isVertexProperty()).isTrue(); + assertThat(vertexPropertyNode.isVertexProperty()).isNotNull(); + } + + @Test + public void should_check_if_node_is_path_for_graphson_1_0() throws IOException { + // when + GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), GRAPHSON_1_0); + + // then + assertThat(pathNode.isPath()).isFalse(); + assertThatThrownBy(pathNode::asPath).isExactlyInstanceOf(UnsupportedOperationException.class); + } + + @Test + @UseDataProvider("objectGraphNodeProtocols") + public void should_check_if_node_is_path(GraphProtocol graphProtocol) throws IOException { + // when + GraphNode pathNode = serdeAndCreateGraphNode(EmptyPath.instance(), graphProtocol); + + // then + assertThat(pathNode.isPath()).isTrue(); + assertThat(pathNode.asPath()).isNotNull(); + } + + @Test + @UseDataProvider("allGraphProtocols") + public void should_check_if_node_is_vertex(GraphProtocol graphProtocol) throws IOException { + // when + GraphNode vertexNode = + serdeAndCreateGraphNode(new DetachedVertex("a", "l", null), graphProtocol); + + // then + assertThat(vertexNode.isVertex()).isTrue(); + assertThat(vertexNode.asVertex()).isNotNull(); + } + + @Test + @UseDataProvider("allGraphProtocols") + public void should_check_if_node_is_edge(GraphProtocol graphProtocol) throws IOException { + // when + GraphNode edgeNode = + serdeAndCreateGraphNode( + new DetachedEdge("a", "l", Collections.emptyMap(), "v1", "l1", "v2", "l2"), + graphProtocol); + + // then + assertThat(edgeNode.isEdge()).isTrue(); + assertThat(edgeNode.asEdge()).isNotNull(); + } + + private GraphNode serdeAndCreateGraphNode(Object inputValue, GraphProtocol graphProtocol) + throws IOException { + if (graphProtocol.isGraphBinary()) { + Buffer tinkerBuf = graphBinaryModule.serialize(new DefaultRemoteTraverser<>(inputValue, 0L)); + ByteBuffer nioBuffer = tinkerBuf.nioBuffer(); + tinkerBuf.release(); + return new ObjectGraphNode( + GraphConversions.createGraphBinaryGraphNode( + ImmutableList.of(nioBuffer), graphBinaryModule) + .as(Traverser.class) + .get()); + } else { + return GraphSONUtils.createGraphNode( + ImmutableList.of(GraphSONUtils.serializeToByteBuffer(inputValue, graphProtocol)), + graphProtocol); + } + } + + @DataProvider + public static Object[][] allGraphProtocols() { + return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; + } + + @DataProvider + public static Object[][] graphson1_0and2_0() { + return new Object[][] {{GRAPHSON_1_0}, {GRAPHSON_2_0}}; + } + + @DataProvider + public static Object[][] objectGraphNodeProtocols() { + return new Object[][] {{GRAPHSON_2_0}, {GRAPH_BINARY_1_0}}; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java new file mode 100644 index 00000000000..9f325003610 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTest.java @@ -0,0 +1,589 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_1_0; +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPHSON_2_0; +import static com.datastax.dse.driver.internal.core.graph.GraphProtocol.GRAPH_BINARY_1_0; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.serialize; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.singleGraphRow; +import static com.datastax.oss.driver.api.core.type.codec.TypeCodecs.BIGINT; +import static com.datastax.oss.driver.api.core.type.codec.TypeCodecs.TEXT; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.dse.protocol.internal.request.RawBytesQuery; +import com.datastax.dse.protocol.internal.request.query.DseQueryOptions; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.internal.core.cql.Conversions; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.request.Query; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class GraphRequestHandlerTest { + + private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("test-graph\\|\\d+"); + + @Mock DefaultNode node; + + @Mock protected NodeMetricUpdater nodeMetricUpdater1; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(node.getMetricUpdater()).thenReturn(nodeMetricUpdater1); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_create_query_message_from_script_statement(GraphProtocol graphProtocol) + throws IOException { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + ScriptGraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery") + .setQueryParam("p1", 1L) + .setQueryParam("p2", Uuids.random()); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Message m = + GraphConversions.createMessageFromGraphStatement( + graphStatement, graphProtocol, executionProfile, harness.getContext(), module); + + // checks + assertThat(m).isInstanceOf(Query.class); + Query q = ((Query) m); + assertThat(q.query).isEqualTo("mockQuery"); + assertThat(q.options.positionalValues) + .containsExactly(serialize(graphStatement.getQueryParams(), graphProtocol, module)); + assertThat(q.options.namedValues).isEmpty(); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_create_query_message_from_fluent_statement(GraphProtocol graphProtocol) + throws IOException { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + GraphTraversal traversalTest = + DseGraph.g.V().has("person", "name", "marko").has("p1", 1L).has("p2", Uuids.random()); + GraphStatement graphStatement = FluentGraphStatement.newInstance(traversalTest); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Message m = + GraphConversions.createMessageFromGraphStatement( + graphStatement, graphProtocol, executionProfile, harness.getContext(), module); + + Map createdCustomPayload = + GraphConversions.createCustomPayload( + graphStatement, graphProtocol, executionProfile, harness.getContext(), module); + + // checks + assertThat(m).isInstanceOf(RawBytesQuery.class); + testQueryRequestAndPayloadContents( + ((RawBytesQuery) m), + createdCustomPayload, + GraphConversions.bytecodeToSerialize(graphStatement), + graphProtocol, + module); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_create_query_message_from_batch_statement(GraphProtocol graphProtocol) + throws IOException { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + @SuppressWarnings("rawtypes") + List traversalsTest = + ImmutableList.of( + // randomly testing some complex data types. Complete suite of data types test is in + // GraphDataTypesTest + DseGraph.g + .addV("person") + .property("p1", 2.3f) + .property("p2", LocalDateTime.now(ZoneOffset.UTC)), + DseGraph.g + .addV("software") + .property("p3", new BigInteger("123456789123456789123456789123456789")) + .property("p4", ImmutableList.of(Point.fromCoordinates(30.4, 25.63746284)))); + GraphStatement graphStatement = + BatchGraphStatement.builder().addTraversals(traversalsTest).build(); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Message m = + GraphConversions.createMessageFromGraphStatement( + graphStatement, graphProtocol, executionProfile, harness.getContext(), module); + + Map createdCustomPayload = + GraphConversions.createCustomPayload( + graphStatement, graphProtocol, executionProfile, harness.getContext(), module); + + // checks + assertThat(m).isInstanceOf(RawBytesQuery.class); + testQueryRequestAndPayloadContents( + ((RawBytesQuery) m), + createdCustomPayload, + GraphConversions.bytecodeToSerialize(graphStatement), + graphProtocol, + module); + } + + private void testQueryRequestAndPayloadContents( + RawBytesQuery q, + Map customPayload, + Object traversalTest, + GraphProtocol graphProtocol, + GraphBinaryModule module) + throws IOException { + if (graphProtocol.isGraphBinary()) { + assertThat(q.query).isEqualTo(GraphConversions.EMPTY_STRING_QUERY); + assertThat(customPayload).containsKey(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); + ByteBuffer encodedQuery = customPayload.get(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); + assertThat(encodedQuery).isNotNull(); + assertThat(encodedQuery).isEqualTo(serialize(traversalTest, graphProtocol, module)); + } else { + assertThat(q.query).isEqualTo(serialize(traversalTest, graphProtocol, module).array()); + assertThat(customPayload).doesNotContainKey(GraphConversions.GRAPH_BINARY_QUERY_OPTION_KEY); + } + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_set_correct_query_options_from_graph_statement(GraphProtocol subProtocol) + throws IOException { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + GraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery").setQueryParam("name", "value"); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + Message m = + GraphConversions.createMessageFromGraphStatement( + graphStatement, subProtocol, executionProfile, harness.getContext(), module); + + // checks + Query query = ((Query) m); + DseQueryOptions options = ((DseQueryOptions) query.options); + assertThat(options.consistency) + .isEqualTo( + DefaultConsistencyLevel.valueOf( + executionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .getProtocolCode()); + // set by the mock timestamp generator + assertThat(options.defaultTimestamp).isEqualTo(-9223372036854775808L); + assertThat(options.positionalValues) + .isEqualTo( + ImmutableList.of(serialize(ImmutableMap.of("name", "value"), subProtocol, module))); + + m = + GraphConversions.createMessageFromGraphStatement( + graphStatement.setTimestamp(2L), + subProtocol, + executionProfile, + harness.getContext(), + module); + query = ((Query) m); + options = ((DseQueryOptions) query.options); + assertThat(options.defaultTimestamp).isEqualTo(2L); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_create_payload_from_config_options(GraphProtocol subProtocol) { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + GraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName("test-graph"); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Map requestPayload = + GraphConversions.createCustomPayload( + graphStatement, subProtocol, executionProfile, harness.getContext(), module); + + // checks + Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); + Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_NAME, null); + Mockito.verify(executionProfile).getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); + Mockito.verify(executionProfile).getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); + Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); + Mockito.verify(executionProfile).getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); + + assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)) + .isEqualTo(TEXT.encode("a", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_RESULTS_OPTION_KEY)) + .isEqualTo( + TEXT.encode(subProtocol.toInternalCode(), harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)) + .isEqualTo(TEXT.encode("mockGraph", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_LANG_OPTION_KEY)) + .isEqualTo(TEXT.encode("gremlin-groovy", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_TIMEOUT_OPTION_KEY)) + .isEqualTo(BIGINT.encode(2L, harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) + .isEqualTo(TEXT.encode("LOCAL_TWO", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) + .isEqualTo(TEXT.encode("LOCAL_THREE", harness.getContext().getProtocolVersion())); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_create_payload_from_statement_options(GraphProtocol subProtocol) { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + GraphStatement graphStatement = + ScriptGraphStatement.builder("mockQuery") + .setGraphName("mockGraph") + .setTraversalSource("a") + .setTimeout(Duration.ofMillis(2)) + .setReadConsistencyLevel(DefaultConsistencyLevel.TWO) + .setWriteConsistencyLevel(DefaultConsistencyLevel.THREE) + .setSystemQuery(false) + .build(); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Map requestPayload = + GraphConversions.createCustomPayload( + graphStatement, subProtocol, executionProfile, harness.getContext(), module); + + // checks + Mockito.verify(executionProfile, never()) + .getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null); + Mockito.verify(executionProfile, never()).getString(DseDriverOption.GRAPH_NAME, null); + Mockito.verify(executionProfile, never()) + .getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false); + Mockito.verify(executionProfile, never()) + .getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO); + Mockito.verify(executionProfile, never()) + .getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null); + Mockito.verify(executionProfile, never()) + .getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null); + + assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)) + .isEqualTo(TEXT.encode("a", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_RESULTS_OPTION_KEY)) + .isEqualTo( + TEXT.encode(subProtocol.toInternalCode(), harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)) + .isEqualTo(TEXT.encode("mockGraph", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_LANG_OPTION_KEY)) + .isEqualTo(TEXT.encode("gremlin-groovy", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_TIMEOUT_OPTION_KEY)) + .isEqualTo(BIGINT.encode(2L, harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) + .isEqualTo(TEXT.encode("TWO", harness.getContext().getProtocolVersion())); + assertThat(requestPayload.get(GraphConversions.GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) + .isEqualTo(TEXT.encode("THREE", harness.getContext().getProtocolVersion())); + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_not_set_graph_name_on_system_queries(GraphProtocol subProtocol) { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + GraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery").setSystemQuery(true); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Map requestPayload = + GraphConversions.createCustomPayload( + graphStatement, subProtocol, executionProfile, harness.getContext(), module); + + // checks + assertThat(requestPayload.get(GraphConversions.GRAPH_NAME_OPTION_KEY)).isNull(); + assertThat(requestPayload.get(GraphConversions.GRAPH_SOURCE_OPTION_KEY)).isNull(); + } + + @Test + @UseDataProvider("supportedGraphProtocolsWithDseVersions") + public void should_return_results_for_statements(GraphProtocol graphProtocol, Version dseVersion) + throws IOException { + + GraphRequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder() + .withGraphProtocolForTestConfig(graphProtocol) + .withDseVersionInMetadata(dseVersion); + PoolBehavior node1Behavior = builder.customBehavior(node); + GraphRequestHandlerTestHarness harness = builder.build(); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // ideally we would be able to provide a function here to + // produce results instead of a static predefined response. + // Function to which we would pass the harness instance or a (mocked)DriverContext. + // Since that's not possible in the RequestHandlerTestHarness API at the moment, we + // have to use another DseDriverContext and GraphBinaryModule here, + // instead of reusing the one in the harness' DriverContext + node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(graphProtocol, module))); + + GraphSupportChecker graphSupportChecker = mock(GraphSupportChecker.class); + when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); + when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); + + GraphRequestAsyncProcessor p = + Mockito.spy(new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)); + when(p.getGraphBinaryModule()).thenReturn(module); + + GraphStatement graphStatement = + ScriptGraphStatement.newInstance("mockQuery").setExecutionProfileName("test-graph"); + GraphResultSet grs = + new GraphRequestSyncProcessor(p) + .process(graphStatement, harness.getSession(), harness.getContext(), "test-graph"); + + List nodes = grs.all(); + assertThat(nodes.size()).isEqualTo(1); + + GraphNode graphNode = nodes.get(0); + assertThat(graphNode.isVertex()).isTrue(); + + Vertex vRead = graphNode.asVertex(); + assertThat(vRead.label()).isEqualTo("person"); + assertThat(vRead.id()).isEqualTo(1); + if (!graphProtocol.isGraphBinary()) { + // GraphBinary does not encode properties regardless of whether they are present in the + // parent element or not :/ + assertThat(vRead.property("name").id()).isEqualTo(11); + assertThat(vRead.property("name").value()).isEqualTo("marko"); + } + } + + @DataProvider + public static Object[][] supportedGraphProtocolsWithDseVersions() { + return new Object[][] { + {GRAPHSON_1_0, Version.parse("6.7.0")}, + {GRAPHSON_1_0, Version.parse("6.8.0")}, + {GRAPHSON_2_0, Version.parse("6.7.0")}, + {GRAPHSON_2_0, Version.parse("6.8.0")}, + {GRAPH_BINARY_1_0, Version.parse("6.7.0")}, + {GRAPH_BINARY_1_0, Version.parse("6.8.0")}, + }; + } + + @Test + @UseDataProvider("dseVersionsWithDefaultGraphProtocol") + public void should_invoke_request_tracker_and_update_metrics( + GraphProtocol graphProtocol, Version dseVersion) throws IOException { + when(nodeMetricUpdater1.isEnabled( + DseNodeMetric.GRAPH_MESSAGES, DriverExecutionProfile.DEFAULT_NAME)) + .thenReturn(true); + + GraphRequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder() + .withGraphProtocolForTestConfig(graphProtocol) + .withDseVersionInMetadata(dseVersion); + PoolBehavior node1Behavior = builder.customBehavior(node); + GraphRequestHandlerTestHarness harness = builder.build(); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + GraphSupportChecker graphSupportChecker = mock(GraphSupportChecker.class); + when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); + when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); + + GraphRequestAsyncProcessor p = + Mockito.spy(new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)); + when(p.getGraphBinaryModule()).thenReturn(module); + + RequestTracker requestTracker = mock(RequestTracker.class); + when(harness.getContext().getRequestTracker()).thenReturn(requestTracker); + + GraphStatement graphStatement = ScriptGraphStatement.newInstance("mockQuery"); + + node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(graphProtocol, module))); + + GraphResultSet grs = + new GraphRequestSyncProcessor( + new GraphRequestAsyncProcessor(harness.getContext(), graphSupportChecker)) + .process(graphStatement, harness.getSession(), harness.getContext(), "test-graph"); + + List nodes = grs.all(); + assertThat(nodes.size()).isEqualTo(1); + + GraphNode graphNode = nodes.get(0); + assertThat(graphNode.isVertex()).isTrue(); + + Vertex actual = graphNode.asVertex(); + assertThat(actual.label()).isEqualTo("person"); + assertThat(actual.id()).isEqualTo(1); + if (!graphProtocol.isGraphBinary()) { + // GraphBinary does not encode properties regardless of whether they are present in the + // parent element or not :/ + assertThat(actual.property("name").id()).isEqualTo(11); + assertThat(actual.property("name").value()).isEqualTo("marko"); + } + + verify(requestTracker) + .onSuccess( + eq(graphStatement), + anyLong(), + any(DriverExecutionProfile.class), + eq(node), + matches(LOG_PREFIX_PER_REQUEST)); + verify(requestTracker) + .onNodeSuccess( + eq(graphStatement), + anyLong(), + any(DriverExecutionProfile.class), + eq(node), + matches(LOG_PREFIX_PER_REQUEST)); + verifyNoMoreInteractions(requestTracker); + + verify(nodeMetricUpdater1) + .isEnabled(DseNodeMetric.GRAPH_MESSAGES, DriverExecutionProfile.DEFAULT_NAME); + verify(nodeMetricUpdater1) + .updateTimer( + eq(DseNodeMetric.GRAPH_MESSAGES), + eq(DriverExecutionProfile.DEFAULT_NAME), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + verifyNoMoreInteractions(nodeMetricUpdater1); + + verify(harness.getSession().getMetricUpdater()) + .isEnabled(DseSessionMetric.GRAPH_REQUESTS, DriverExecutionProfile.DEFAULT_NAME); + verify(harness.getSession().getMetricUpdater()) + .updateTimer( + eq(DseSessionMetric.GRAPH_REQUESTS), + eq(DriverExecutionProfile.DEFAULT_NAME), + anyLong(), + eq(TimeUnit.NANOSECONDS)); + verifyNoMoreInteractions(harness.getSession().getMetricUpdater()); + } + + @Test + public void should_honor_statement_consistency_level() { + // initialization + GraphRequestHandlerTestHarness harness = GraphRequestHandlerTestHarness.builder().build(); + ScriptGraphStatement graphStatement = + ScriptGraphStatement.builder("mockScript") + .setConsistencyLevel(DefaultConsistencyLevel.THREE) + .build(); + + GraphBinaryModule module = createGraphBinaryModule(harness.getContext()); + + // when + DriverExecutionProfile executionProfile = + Conversions.resolveExecutionProfile(graphStatement, harness.getContext()); + + Message m = + GraphConversions.createMessageFromGraphStatement( + graphStatement, GRAPH_BINARY_1_0, executionProfile, harness.getContext(), module); + + // checks + assertThat(m).isInstanceOf(Query.class); + Query q = ((Query) m); + assertThat(q.options.consistency).isEqualTo(DefaultConsistencyLevel.THREE.getProtocolCode()); + } + + @DataProvider + public static Object[][] dseVersionsWithDefaultGraphProtocol() { + // Default GraphSON sub protocol version differs based on DSE version, so test with a version + // less than DSE 6.8 as well as DSE 6.8. + return new Object[][] { + {GRAPHSON_2_0, Version.parse("6.7.0")}, + {GRAPH_BINARY_1_0, Version.parse("6.8.0")}, + }; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java new file mode 100644 index 00000000000..7e46b09bd59 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphRequestHandlerTestHarness.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestFixtures; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.cql.RequestHandlerTestHarness; +import com.datastax.oss.driver.internal.core.servererrors.DefaultWriteTypeRegistry; +import com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler; +import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; +import com.datastax.oss.protocol.internal.Frame; +import io.netty.channel.EventLoop; +import java.time.Duration; +import javax.annotation.Nullable; +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; + +/** + * Provides the environment to test a request handler, where a query plan can be defined, and the + * behavior of each successive node simulated. + */ +public class GraphRequestHandlerTestHarness extends RequestHandlerTestHarness { + + @Mock DriverExecutionProfile testProfile; + + @Mock DriverExecutionProfile systemQueryExecutionProfile; + + @Mock DefaultDriverContext dseDriverContext; + + @Mock EventLoop eventLoop; + + protected GraphRequestHandlerTestHarness( + Builder builder, + @Nullable GraphProtocol graphProtocolForTestConfig, + Duration graphTimeout, + @Nullable Version dseVersionForTestMetadata) { + super(builder); + + // not mocked by RequestHandlerTestHarness, will be used when DseDriverOptions.GRAPH_TIMEOUT + // is not zero in the config + when(eventLoopGroup.next()).thenReturn(eventLoop); + + // default graph options as in the reference.conf file + when(defaultProfile.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("g"); + when(defaultProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(Boolean.FALSE); + when(defaultProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)).thenReturn(false); + when(defaultProfile.getString(DseDriverOption.GRAPH_NAME, null)).thenReturn("mockGraph"); + when(defaultProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) + .thenReturn(graphTimeout); + + when(testProfile.getName()).thenReturn("test-graph"); + when(testProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) + .thenReturn(Duration.ofMillis(2L)); + when(testProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); + when(testProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).thenReturn(5000); + when(testProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) + .thenReturn(DefaultConsistencyLevel.SERIAL.name()); + when(testProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)).thenReturn(false); + when(testProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)).thenReturn(true); + when(testProfile.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("a"); + when(testProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)) + .thenReturn(graphProtocolForTestConfig != null); + // only mock the config if graphProtocolForTestConfig is not null + if (graphProtocolForTestConfig != null) { + when(testProfile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)) + .thenReturn(graphProtocolForTestConfig.toInternalCode()); + } + when(testProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)).thenReturn(false); + when(testProfile.getString(DseDriverOption.GRAPH_NAME, null)).thenReturn("mockGraph"); + when(testProfile.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null)) + .thenReturn("LOCAL_TWO"); + when(testProfile.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null)) + .thenReturn("LOCAL_THREE"); + when(config.getProfile("test-graph")).thenReturn(testProfile); + + when(systemQueryExecutionProfile.getName()).thenReturn("graph-system-query"); + when(systemQueryExecutionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) + .thenReturn(Duration.ZERO); + when(systemQueryExecutionProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .thenReturn(DefaultConsistencyLevel.LOCAL_ONE.name()); + when(systemQueryExecutionProfile.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)) + .thenReturn(5000); + when(systemQueryExecutionProfile.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) + .thenReturn(DefaultConsistencyLevel.SERIAL.name()); + when(systemQueryExecutionProfile.getBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE)) + .thenReturn(false); + when(systemQueryExecutionProfile.getBoolean(DefaultDriverOption.PREPARE_ON_ALL_NODES)) + .thenReturn(true); + when(systemQueryExecutionProfile.getName()).thenReturn("graph-system-query"); + when(systemQueryExecutionProfile.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO)) + .thenReturn(Duration.ofMillis(2)); + when(systemQueryExecutionProfile.getBoolean(DseDriverOption.GRAPH_IS_SYSTEM_QUERY, false)) + .thenReturn(true); + when(systemQueryExecutionProfile.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null)) + .thenReturn("LOCAL_TWO"); + when(systemQueryExecutionProfile.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null)) + .thenReturn("LOCAL_THREE"); + + when(config.getProfile("graph-system-query")).thenReturn(systemQueryExecutionProfile); + + // need to re-mock everything on the context because the RequestHandlerTestHarness returns a + // InternalDriverContext and not a DseDriverContext. Couldn't figure out a way with mockito + // to say "mock this object (this.dseDriverContext), and delegate every call to that + // other object (this.context), except _this_ call and _this_ and so on" + // Spy wouldn't work because the spied object has to be of the same type as the final object + when(dseDriverContext.getConfig()).thenReturn(config); + when(dseDriverContext.getNettyOptions()).thenReturn(nettyOptions); + when(dseDriverContext.getLoadBalancingPolicyWrapper()).thenReturn(loadBalancingPolicyWrapper); + when(dseDriverContext.getRetryPolicy(ArgumentMatchers.anyString())).thenReturn(retryPolicy); + when(dseDriverContext.getSpeculativeExecutionPolicy(ArgumentMatchers.anyString())) + .thenReturn(speculativeExecutionPolicy); + when(dseDriverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); + when(dseDriverContext.getTimestampGenerator()).thenReturn(timestampGenerator); + when(dseDriverContext.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); + when(dseDriverContext.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); + when(dseDriverContext.getConsistencyLevelRegistry()) + .thenReturn(new DefaultConsistencyLevelRegistry()); + when(dseDriverContext.getWriteTypeRegistry()).thenReturn(new DefaultWriteTypeRegistry()); + when(dseDriverContext.getRequestThrottler()) + .thenReturn(new PassThroughRequestThrottler(dseDriverContext)); + when(dseDriverContext.getRequestTracker()).thenReturn(new NoopRequestTracker(dseDriverContext)); + // if DSE Version is specified for test metadata, then we need to mock that up on the context + if (dseVersionForTestMetadata != null) { + DseTestFixtures.mockNodesInMetadataWithVersions( + dseDriverContext, true, dseVersionForTestMetadata); + } + } + + @Override + public DefaultDriverContext getContext() { + return dseDriverContext; + } + + public static GraphRequestHandlerTestHarness.Builder builder() { + return new GraphRequestHandlerTestHarness.Builder(); + } + + public static class Builder extends RequestHandlerTestHarness.Builder { + + private GraphProtocol graphProtocolForTestConfig; + private Duration graphTimeout = Duration.ZERO; + private Version dseVersionForTestMetadata; + + public GraphRequestHandlerTestHarness.Builder withGraphProtocolForTestConfig( + GraphProtocol protocol) { + this.graphProtocolForTestConfig = protocol; + return this; + } + + public GraphRequestHandlerTestHarness.Builder withDseVersionInMetadata(Version dseVersion) { + this.dseVersionForTestMetadata = dseVersion; + return this; + } + + public GraphRequestHandlerTestHarness.Builder withGraphTimeout(Duration globalTimeout) { + this.graphTimeout = globalTimeout; + return this; + } + + @Override + public GraphRequestHandlerTestHarness.Builder withEmptyPool(Node node) { + super.withEmptyPool(node); + return this; + } + + @Override + public GraphRequestHandlerTestHarness.Builder withWriteFailure(Node node, Throwable cause) { + super.withWriteFailure(node, cause); + return this; + } + + @Override + public GraphRequestHandlerTestHarness.Builder withResponseFailure(Node node, Throwable cause) { + super.withResponseFailure(node, cause); + return this; + } + + @Override + public GraphRequestHandlerTestHarness.Builder withResponse(Node node, Frame response) { + super.withResponse(node, response); + return this; + } + + @Override + public GraphRequestHandlerTestHarness.Builder withDefaultIdempotence( + boolean defaultIdempotence) { + super.withDefaultIdempotence(defaultIdempotence); + return this; + } + + @Override + public GraphRequestHandlerTestHarness.Builder withProtocolVersion( + ProtocolVersion protocolVersion) { + super.withProtocolVersion(protocolVersion); + return this; + } + + @Override + public GraphRequestHandlerTestHarness build() { + return new GraphRequestHandlerTestHarness( + this, graphProtocolForTestConfig, graphTimeout, dseVersionForTestMetadata); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java new file mode 100644 index 00000000000..aed248675ae --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetTestBase.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Queue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + +public abstract class GraphResultSetTestBase { + + /** Mocks an async result set where column 0 has type INT, with rows with the provided data. */ + protected AsyncGraphResultSet mockPage(boolean nextPage, Integer... data) { + AsyncGraphResultSet page = mock(AsyncGraphResultSet.class); + + ExecutionInfo executionInfo = mock(ExecutionInfo.class); + when(page.getRequestExecutionInfo()).thenReturn(executionInfo); + + if (nextPage) { + when(page.hasMorePages()).thenReturn(true); + when(page.fetchNextPage()).thenReturn(spy(new CompletableFuture<>())); + } else { + when(page.hasMorePages()).thenReturn(false); + when(page.fetchNextPage()).thenThrow(new IllegalStateException()); + } + + // Emulate DefaultAsyncResultSet's internals (this is a bit sketchy, maybe it would be better + // to use real DefaultAsyncResultSet instances) + Queue queue = Lists.newLinkedList(Arrays.asList(data)); + CountingIterator iterator = + new CountingIterator(queue.size()) { + @Override + protected GraphNode computeNext() { + Integer index = queue.poll(); + return (index == null) ? endOfData() : mockRow(index); + } + }; + when(page.currentPage()).thenReturn(() -> iterator); + when(page.remaining()).thenAnswer(invocation -> iterator.remaining()); + + return page; + } + + private GraphNode mockRow(int index) { + GraphNode row = mock(GraphNode.class); + when(row.asInt()).thenReturn(index); + return row; + } + + protected static void complete( + CompletionStage stage, AsyncGraphResultSet result) { + stage.toCompletableFuture().complete(result); + } + + protected void assertNextRow(Iterator iterator, int expectedValue) { + assertThat(iterator.hasNext()).isTrue(); + GraphNode row = iterator.next(); + assertThat(row.asInt()).isEqualTo(expectedValue); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java new file mode 100644 index 00000000000..fd5cffd2530 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphResultSetsTest.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import java.util.Iterator; +import org.junit.Test; + +public class GraphResultSetsTest extends GraphResultSetTestBase { + + @Test + public void should_create_result_set_from_single_page() { + // Given + AsyncGraphResultSet page1 = mockPage(false, 0, 1, 2); + + // When + GraphResultSet resultSet = GraphResultSets.toSync(page1); + + // Then + assertThat(resultSet.getRequestExecutionInfo()).isSameAs(page1.getRequestExecutionInfo()); + + Iterator iterator = resultSet.iterator(); + + assertNextRow(iterator, 0); + assertNextRow(iterator, 1); + assertNextRow(iterator, 2); + + assertThat(iterator.hasNext()).isFalse(); + } + + @Test + public void should_create_result_set_from_multiple_pages() { + // Given + AsyncGraphResultSet page1 = mockPage(true, 0, 1, 2); + AsyncGraphResultSet page2 = mockPage(true, 3, 4, 5); + AsyncGraphResultSet page3 = mockPage(false, 6, 7, 8); + + complete(page1.fetchNextPage(), page2); + complete(page2.fetchNextPage(), page3); + + // When + GraphResultSet resultSet = GraphResultSets.toSync(page1); + + // Then + assertThat(resultSet.iterator().hasNext()).isTrue(); + + assertThat(resultSet.getRequestExecutionInfo()).isSameAs(page1.getRequestExecutionInfo()); + assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) + .containsExactly(page1.getRequestExecutionInfo()); + + Iterator iterator = resultSet.iterator(); + + assertNextRow(iterator, 0); + assertNextRow(iterator, 1); + assertNextRow(iterator, 2); + + assertThat(iterator.hasNext()).isTrue(); + // This should have triggered the fetch of page2 + assertThat(resultSet.getRequestExecutionInfo()).isEqualTo(page2.getRequestExecutionInfo()); + assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) + .containsExactly(page1.getRequestExecutionInfo(), page2.getRequestExecutionInfo()); + + assertNextRow(iterator, 3); + assertNextRow(iterator, 4); + assertNextRow(iterator, 5); + + assertThat(iterator.hasNext()).isTrue(); + // This should have triggered the fetch of page3 + assertThat(resultSet.getRequestExecutionInfo()).isEqualTo(page3.getRequestExecutionInfo()); + assertThat(((MultiPageGraphResultSet) resultSet).getRequestExecutionInfos()) + .containsExactly( + page1.getRequestExecutionInfo(), + page2.getRequestExecutionInfo(), + page3.getRequestExecutionInfo()); + + assertNextRow(iterator, 6); + assertNextRow(iterator, 7); + assertNextRow(iterator, 8); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java new file mode 100644 index 00000000000..4799437e617 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphStatementBuilderBaseTest.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphStatementBuilderBase; +import com.datastax.oss.driver.api.core.cql.Statement; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.junit.Test; + +public class GraphStatementBuilderBaseTest { + + private static class MockGraphStatementBuilder + extends GraphStatementBuilderBase { + + @NonNull + @Override + public FluentGraphStatement build() { + FluentGraphStatement rv = mock(FluentGraphStatement.class); + when(rv.getTimestamp()).thenReturn(this.timestamp); + return rv; + } + } + + @Test + public void should_use_timestamp_if_set() { + + MockGraphStatementBuilder builder = new MockGraphStatementBuilder(); + builder.setTimestamp(1); + assertThat(builder.build().getTimestamp()).isEqualTo(1); + } + + @Test + public void should_use_correct_default_timestamp_if_not_set() { + + MockGraphStatementBuilder builder = new MockGraphStatementBuilder(); + assertThat(builder.build().getTimestamp()).isEqualTo(Statement.NO_DEFAULT_TIMESTAMP); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java new file mode 100644 index 00000000000..ec31bd4b12d --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphSupportCheckerTest.java @@ -0,0 +1,325 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static com.datastax.dse.driver.DseTestFixtures.mockNodesInMetadataWithVersions; +import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.AUTO; +import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.DISABLED; +import static com.datastax.dse.driver.api.core.graph.PagingEnabledOptions.ENABLED; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.PagingEnabledOptions; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.dse.driver.internal.core.DseProtocolFeature; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.MetadataManager; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class GraphSupportCheckerTest { + + @UseDataProvider("graphPagingEnabledAndDseVersions") + @Test + public void should_check_if_paging_is_supported( + boolean protocolWithPagingSupport, + PagingEnabledOptions statementGraphPagingEnabled, + PagingEnabledOptions contextGraphPagingEnabled, + List nodeDseVersions, + boolean expected) { + // given + GraphStatement graphStatement = mock(GraphStatement.class); + InternalDriverContext context = protocolWithPagingSupport(protocolWithPagingSupport); + statementGraphPagingEnabled(graphStatement, statementGraphPagingEnabled); + contextGraphPagingEnabled(context, contextGraphPagingEnabled); + addNodeWithDseVersion(context, nodeDseVersions); + + // when + boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); + + // then + assertThat(pagingEnabled).isEqualTo(expected); + } + + @Test + public void should_not_support_paging_when_statement_profile_not_present() { + // given + GraphStatement graphStatement = mock(GraphStatement.class); + InternalDriverContext context = protocolWithPagingSupport(true); + contextGraphPagingEnabled(context, DISABLED); + addNodeWithDseVersion(context, Collections.singletonList(Version.parse("6.8.0"))); + + // when + boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); + + // then + assertThat(pagingEnabled).isEqualTo(false); + } + + @Test + public void + should_support_paging_when_statement_profile_not_present_but_context_profile_has_paging_enabled() { + // given + GraphStatement graphStatement = mock(GraphStatement.class); + InternalDriverContext context = protocolWithPagingSupport(true); + contextGraphPagingEnabled(context, ENABLED); + addNodeWithDseVersion(context, Collections.singletonList(Version.parse("6.8.0"))); + + // when + boolean pagingEnabled = new GraphSupportChecker().isPagingEnabled(graphStatement, context); + + // then + assertThat(pagingEnabled).isEqualTo(true); + } + + @DataProvider() + public static Object[][] graphPagingEnabledAndDseVersions() { + List listWithGraphPagingNode = Collections.singletonList(Version.parse("6.8.0")); + List listWithoutGraphPagingNode = Collections.singletonList(Version.parse("6.7.0")); + List listWithNull = Collections.singletonList(null); + List listWithTwoNodesOneNotSupporting = + Arrays.asList(Version.parse("6.7.0"), Version.parse("6.8.0")); + + return new Object[][] { + {false, ENABLED, ENABLED, listWithGraphPagingNode, true}, + {true, ENABLED, ENABLED, listWithoutGraphPagingNode, true}, + {true, ENABLED, DISABLED, listWithGraphPagingNode, true}, + {true, ENABLED, ENABLED, listWithGraphPagingNode, true}, + {true, ENABLED, ENABLED, listWithNull, true}, + {true, ENABLED, ENABLED, listWithTwoNodesOneNotSupporting, true}, + {true, DISABLED, ENABLED, listWithGraphPagingNode, false}, + {true, DISABLED, AUTO, listWithGraphPagingNode, false}, + {true, DISABLED, DISABLED, listWithGraphPagingNode, false}, + {true, AUTO, AUTO, listWithGraphPagingNode, true}, + {true, AUTO, DISABLED, listWithGraphPagingNode, true}, + {false, AUTO, AUTO, listWithGraphPagingNode, false}, + {true, AUTO, AUTO, listWithTwoNodesOneNotSupporting, false}, + {true, AUTO, AUTO, listWithNull, false}, + }; + } + + private void addNodeWithDseVersion(InternalDriverContext context, List dseVersions) { + MetadataManager manager = mock(MetadataManager.class); + when(context.getMetadataManager()).thenReturn(manager); + Metadata metadata = mock(Metadata.class); + when(manager.getMetadata()).thenReturn(metadata); + Map nodes = new HashMap<>(); + for (Version v : dseVersions) { + Node node = mock(Node.class); + Map extras = new HashMap<>(); + extras.put(DseNodeProperties.DSE_VERSION, v); + when(node.getExtras()).thenReturn(extras); + nodes.put(UUID.randomUUID(), node); + } + when(metadata.getNodes()).thenReturn(nodes); + } + + private void contextGraphPagingEnabled( + InternalDriverContext context, PagingEnabledOptions option) { + DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); + when(driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)) + .thenReturn(option.name()); + DriverConfig config = mock(DriverConfig.class); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(driverExecutionProfile); + } + + private InternalDriverContext protocolWithPagingSupport(boolean pagingSupport) { + InternalDriverContext context = mock(InternalDriverContext.class); + when(context.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); + ProtocolVersionRegistry protocolVersionRegistry = mock(ProtocolVersionRegistry.class); + when(protocolVersionRegistry.supports( + DseProtocolVersion.DSE_V2, DseProtocolFeature.CONTINUOUS_PAGING)) + .thenReturn(pagingSupport); + when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); + return context; + } + + private void statementGraphPagingEnabled( + GraphStatement graphStatement, PagingEnabledOptions option) { + DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); + when(driverExecutionProfile.getString(DseDriverOption.GRAPH_PAGING_ENABLED)) + .thenReturn(option.name()); + when(graphStatement.getExecutionProfile()).thenReturn(driverExecutionProfile); + } + + @Test + @UseDataProvider("dseVersionsAndGraphProtocols") + public void should_determine_default_graph_protocol_from_dse_version( + Version[] dseVersions, GraphProtocol expectedProtocol) { + // mock up the metadata for the context + // using 'true' here will treat null test Versions as no DSE_VERSION info in the metadata + DefaultDriverContext context = + mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersions); + GraphProtocol graphProtocol = new GraphSupportChecker().getDefaultGraphProtocol(context); + assertThat(graphProtocol).isEqualTo(expectedProtocol); + } + + @Test + @UseDataProvider("dseVersionsAndGraphProtocols") + public void should_determine_default_graph_protocol_from_dse_version_with_null_versions( + Version[] dseVersions, GraphProtocol expectedProtocol) { + // mock up the metadata for the context + // using 'false' here will treat null test Versions as explicit NULL info for DSE_VERSION + DefaultDriverContext context = + mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), false, dseVersions); + GraphProtocol graphProtocol = new GraphSupportChecker().getDefaultGraphProtocol(context); + assertThat(graphProtocol).isEqualTo(expectedProtocol); + } + + @DataProvider + public static Object[][] dseVersionsAndGraphProtocols() { + return new Object[][] { + {new Version[] {Version.parse("5.0.3")}, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {Version.parse("6.0.1")}, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {Version.parse("6.8.0")}, GraphProtocol.GRAPH_BINARY_1_0}, + {new Version[] {Version.parse("7.0.0")}, GraphProtocol.GRAPH_BINARY_1_0}, + {new Version[] {Version.parse("5.0.3"), Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {Version.parse("6.7.4"), Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {Version.parse("6.8.0"), Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, + { + new Version[] {Version.parse("6.8.0"), Version.parse("7.0.0")}, + GraphProtocol.GRAPH_BINARY_1_0 + }, + {new Version[] {Version.parse("6.7.4"), Version.parse("6.7.4")}, GraphProtocol.GRAPHSON_2_0}, + { + new Version[] {Version.parse("6.8.0"), Version.parse("6.8.0")}, + GraphProtocol.GRAPH_BINARY_1_0 + }, + {null, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {null}, GraphProtocol.GRAPHSON_2_0}, + {new Version[] {null, Version.parse("6.8.0")}, GraphProtocol.GRAPHSON_2_0}, + { + new Version[] {Version.parse("6.8.0"), Version.parse("7.0.0"), null}, + GraphProtocol.GRAPHSON_2_0 + }, + }; + } + + @Test + @UseDataProvider(location = DseTestDataProviders.class, value = "supportedGraphProtocols") + public void should_pickup_graph_protocol_from_statement(GraphProtocol graphProtocol) { + GraphStatement graphStatement = mock(GraphStatement.class); + DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); + when(graphStatement.getSubProtocol()).thenReturn(graphProtocol.toInternalCode()); + + GraphProtocol inferredProtocol = + new GraphSupportChecker() + .inferGraphProtocol( + graphStatement, executionProfile, mock(InternalDriverContext.class)); + + assertThat(inferredProtocol).isEqualTo(graphProtocol); + verifyZeroInteractions(executionProfile); + } + + @Test + @UseDataProvider("graphProtocolStringsAndDseVersions") + public void should_pickup_graph_protocol_and_parse_from_string_config( + String stringConfig, Version dseVersion) { + GraphStatement graphStatement = mock(GraphStatement.class); + DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); + when(executionProfile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(Boolean.TRUE); + when(executionProfile.getString(eq(DseDriverOption.GRAPH_SUB_PROTOCOL))) + .thenReturn(stringConfig); + + DefaultDriverContext context = + mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersion); + GraphProtocol inferredProtocol = + new GraphSupportChecker().inferGraphProtocol(graphStatement, executionProfile, context); + assertThat(inferredProtocol.toInternalCode()).isEqualTo(stringConfig); + } + + @DataProvider + public static Object[][] graphProtocolStringsAndDseVersions() { + // putting manual strings here to be sure to be notified if a value in + // GraphProtocol ever changes + return new Object[][] { + {"graphson-1.0", Version.parse("6.7.0")}, + {"graphson-1.0", Version.parse("6.8.0")}, + {"graphson-2.0", Version.parse("6.7.0")}, + {"graphson-2.0", Version.parse("6.8.0")}, + {"graph-binary-1.0", Version.parse("6.7.0")}, + {"graph-binary-1.0", Version.parse("6.8.0")}, + }; + } + + @Test + @UseDataProvider("dseVersions6") + public void should_use_correct_default_protocol_when_parsing(Version dseVersion) { + GraphStatement graphStatement = mock(GraphStatement.class); + DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); + DefaultDriverContext context = + mockNodesInMetadataWithVersions(mock(DefaultDriverContext.class), true, dseVersion); + GraphProtocol inferredProtocol = + new GraphSupportChecker().inferGraphProtocol(graphStatement, executionProfile, context); + // For DSE 6.8 and newer, the default should be GraphSON binary + // for DSE older than 6.8, the default should be GraphSON2 + assertThat(inferredProtocol) + .isEqualTo( + (dseVersion.compareTo(Version.parse("6.8.0")) < 0) + ? GraphProtocol.GRAPHSON_2_0 + : GraphProtocol.GRAPH_BINARY_1_0); + } + + @DataProvider + public static Object[][] dseVersions6() { + return new Object[][] {{Version.parse("6.7.0")}, {Version.parse("6.8.0")}}; + } + + @Test + public void should_fail_if_graph_protocol_used_is_invalid() { + assertThatThrownBy(() -> GraphProtocol.fromString("invalid")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Graph protocol used [\"invalid\"] unknown. Possible values are: [ \"graphson-1.0\", \"graphson-2.0\", \"graph-binary-1.0\"]"); + } + + @Test + public void should_fail_if_graph_protocol_used_is_graphson_3() { + assertThatThrownBy(() -> GraphProtocol.fromString("graphson-3.0")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Graph protocol used [\"graphson-3.0\"] unknown. Possible values are: [ \"graphson-1.0\", \"graphson-2.0\", \"graph-binary-1.0\"]"); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java new file mode 100644 index 00000000000..f58fc54d8c7 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/GraphTestUtils.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.dse.protocol.internal.response.result.DseRowsMetadata; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.response.result.ColumnSpec; +import com.datastax.oss.protocol.internal.response.result.DefaultRows; +import com.datastax.oss.protocol.internal.response.result.RawType; +import com.datastax.oss.protocol.internal.response.result.Rows; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayDeque; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import org.apache.tinkerpop.gremlin.process.remote.traversal.DefaultRemoteTraverser; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.T; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; +import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertex; +import org.apache.tinkerpop.gremlin.structure.util.detached.DetachedVertexProperty; +import org.assertj.core.api.InstanceOfAssertFactories; + +public class GraphTestUtils { + + public static ByteBuffer serialize( + Object value, GraphProtocol graphProtocol, GraphBinaryModule graphBinaryModule) + throws IOException { + + Buffer tinkerBuf = graphBinaryModule.serialize(value); + ByteBuffer nioBuffer = tinkerBuf.nioBuffer(); + tinkerBuf.release(); + return graphProtocol.isGraphBinary() + ? nioBuffer + : GraphSONUtils.serializeToByteBuffer(value, graphProtocol); + } + + public static Frame defaultDseFrameOf(Message responseMessage) { + return Frame.forResponse( + DseProtocolVersion.DSE_V2.getCode(), + 0, + null, + Frame.NO_PAYLOAD, + Collections.emptyList(), + responseMessage); + } + + public static Message singleGraphRow(GraphProtocol graphProtocol, GraphBinaryModule module) + throws IOException { + Vertex value = + DetachedVertex.build() + .setId(1) + .setLabel("person") + .addProperty( + DetachedVertexProperty.build() + .setId(11) + .setLabel("name") + .setValue("marko") + .create()) + .create(); + DseRowsMetadata metadata = + new DseRowsMetadata( + ImmutableList.of( + new ColumnSpec( + "ks", + "table", + "gremlin", + 0, + graphProtocol.isGraphBinary() + ? RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB) + : RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), + null, + new int[] {}, + null, + 1, + true); + Queue> data = new ArrayDeque<>(); + data.add( + ImmutableList.of( + serialize( + graphProtocol.isGraphBinary() + // GraphBinary returns results directly inside a Traverser + ? new DefaultRemoteTraverser<>(value, 1) + : ImmutableMap.of("result", value), + graphProtocol, + module))); + return new DefaultRows(metadata, data); + } + + // Returns 10 rows, each with a vertex + public static Rows tenGraphRows( + GraphProtocol graphProtocol, GraphBinaryModule module, int page, boolean last) + throws IOException { + DseRowsMetadata metadata = + new DseRowsMetadata( + ImmutableList.of( + new ColumnSpec( + "ks", + "table", + "gremlin", + 0, + graphProtocol.isGraphBinary() + ? RawType.PRIMITIVES.get(ProtocolConstants.DataType.BLOB) + : RawType.PRIMITIVES.get(ProtocolConstants.DataType.VARCHAR))), + null, + new int[] {}, + null, + page, + last); + Queue> data = new ArrayDeque<>(); + int start = (page - 1) * 10; + for (int i = start; i < start + 10; i++) { + Vertex v = + DetachedVertex.build() + .setId("vertex" + i) + .setLabel("person") + .addProperty( + DetachedVertexProperty.build() + .setId("property" + i) + .setLabel("name") + .setValue("user" + i) + .create()) + .create(); + data.add( + ImmutableList.of( + serialize( + graphProtocol.isGraphBinary() + // GraphBinary returns results directly inside a Traverser + ? new DefaultRemoteTraverser<>(v, 1) + : ImmutableMap.of("result", v), + graphProtocol, + module))); + } + return new DefaultRows(metadata, data); + } + + public static GraphBinaryModule createGraphBinaryModule(DefaultDriverContext context) { + TypeSerializerRegistry registry = GraphBinaryModule.createDseTypeSerializerRegistry(context); + return new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); + } + + public static void assertThatContainsProperties( + Map properties, Object... propsToMatch) { + for (int i = 0; i < propsToMatch.length; i += 2) { + assertThat(properties).containsEntry(propsToMatch[i], propsToMatch[i + 1]); + } + } + + public static void assertThatContainsLabel( + Map properties, Direction direction, String label) { + assertThat(properties) + .hasEntrySatisfying( + direction, + value -> + assertThat(value) + .asInstanceOf(InstanceOfAssertFactories.MAP) + .containsEntry(T.label, label)); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java new file mode 100644 index 00000000000..e36f7e97e5a --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/binary/GraphDataTypesTest.java @@ -0,0 +1,333 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.binary; + +import static com.datastax.oss.driver.api.core.type.DataTypes.BIGINT; +import static com.datastax.oss.driver.api.core.type.DataTypes.DOUBLE; +import static com.datastax.oss.driver.api.core.type.DataTypes.DURATION; +import static com.datastax.oss.driver.api.core.type.DataTypes.FLOAT; +import static com.datastax.oss.driver.api.core.type.DataTypes.INT; +import static com.datastax.oss.driver.api.core.type.DataTypes.TEXT; +import static com.datastax.oss.driver.api.core.type.DataTypes.listOf; +import static com.datastax.oss.driver.api.core.type.DataTypes.mapOf; +import static com.datastax.oss.driver.api.core.type.DataTypes.setOf; +import static com.datastax.oss.driver.api.core.type.DataTypes.tupleOf; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.type.DseDataTypes; +import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; +import com.datastax.dse.driver.internal.core.data.geometry.Distance; +import com.datastax.dse.driver.internal.core.graph.EditDistance; +import com.datastax.dse.driver.internal.core.graph.GraphConversions; +import com.datastax.dse.driver.internal.core.graph.GraphProtocol; +import com.datastax.dse.driver.internal.core.graph.GraphSONUtils; +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; +import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.util.List; +import java.util.Set; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryReader; +import org.apache.tinkerpop.gremlin.structure.io.binary.GraphBinaryWriter; +import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class GraphDataTypesTest { + + private GraphBinaryModule graphBinaryModule; + + @Mock private DefaultDriverContext context; + + private static final MutableCodecRegistry CODEC_REGISTRY = + new DefaultCodecRegistry("testDseRegistry"); + + static { + CODEC_REGISTRY.register(DseTypeCodecs.POINT, DseTypeCodecs.LINE_STRING, DseTypeCodecs.POLYGON); + } + + private static Object[][] graphsonOneDataTypes = + new Object[][] { + {"~’~^ää#123#ö"}, + {(byte) 34}, + {BigDecimal.TEN}, + {BigInteger.TEN}, + {Boolean.TRUE}, + {false}, + {23}, + {23L}, + {23.0d}, + {23f}, + {(short) 23}, + {LocalDate.now(ZoneOffset.UTC)}, + {LocalTime.now(ZoneOffset.UTC)}, + {java.util.UUID.randomUUID()}, + {Instant.now()}, + }; + + private static Object[][] graphsonTwoDataTypes = + new Object[][] { + {ImmutableList.of(1L, 2L, 3L)}, + {ImmutableSet.of(1L, 2L, 3L)}, + {ImmutableMap.of("a", 1, "b", 2)}, + {Point.fromCoordinates(3.3, 4.4)}, + { + LineString.fromPoints( + Point.fromCoordinates(1, 1), Point.fromCoordinates(2, 2), Point.fromCoordinates(3, 3)) + }, + { + Polygon.fromPoints( + Point.fromCoordinates(3, 4), Point.fromCoordinates(5, 4), Point.fromCoordinates(6, 6)) + }, + }; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(context.getCodecRegistry()).thenReturn(CODEC_REGISTRY); + when(context.getProtocolVersion()).thenReturn(DseProtocolVersion.DSE_V2); + + TypeSerializerRegistry registry = GraphBinaryModule.createDseTypeSerializerRegistry(context); + graphBinaryModule = + new GraphBinaryModule(new GraphBinaryReader(registry), new GraphBinaryWriter(registry)); + } + + @DataProvider + public static Object[][] graphsonOneDataProvider() { + + return graphsonOneDataTypes; + } + + @DataProvider + public static Object[][] graphsonTwoDataProvider() { + + return TestDataProviders.concat(graphsonOneDataTypes, graphsonTwoDataTypes); + } + + @DataProvider + public static Object[][] binaryDataProvider() throws UnknownHostException { + + Object[][] binaryDataTypes = + new Object[][] { + {InetAddress.getLocalHost()}, + {ImmutableList.of(ImmutableList.of(1L, 3L), ImmutableList.of(2L, 4L))}, + {ImmutableSet.of(ImmutableSet.of(1, 2, 3))}, + {ImmutableMap.of(ImmutableMap.of("a", 1), ImmutableMap.of(2, "b"))}, + {tupleOf(INT, TEXT, FLOAT).newValue(1, "2", 3.41f)}, + { + tupleOf(INT, TEXT, tupleOf(TEXT, DURATION)) + .newValue( + 1, "2", tupleOf(TEXT, DURATION).newValue("a", CqlDuration.newInstance(2, 1, 0))) + }, + { + tupleOf( + listOf(INT), + setOf(FLOAT), + DataTypes.mapOf(TEXT, BIGINT), + listOf(listOf(DOUBLE)), + setOf(setOf(FLOAT)), + listOf(tupleOf(INT, TEXT))) + .newValue( + ImmutableList.of(4, 8, 22, 34, 37, 59), + ImmutableSet.of(28f, 44f, 59f), + ImmutableMap.of("big10", 2345L), + ImmutableList.of( + ImmutableList.of(11.1d, 33.3d), ImmutableList.of(22.2d, 44.4d)), + ImmutableSet.of(ImmutableSet.of(55.5f)), + ImmutableList.of(tupleOf(INT, TEXT).newValue(3, "three"))) + }, + { + new UserDefinedTypeBuilder("ks", "udt1") + .withField("a", INT) + .withField("b", TEXT) + .build() + .newValue(1, "two") + }, + {new Distance(Point.fromCoordinates(3.4, 17.0), 2.5)}, + {new EditDistance("xyz", 3)}, + {DseGraph.g.V().has("name", "marko").asAdmin().getBytecode()}, + { + GraphConversions.bytecodeToSerialize( + BatchGraphStatement.builder() + .addTraversal(DseGraph.g.addV("person").property("name", "1")) + .addTraversal(DseGraph.g.addV("person").property("name", "1")) + .build()) + }, + }; + return TestDataProviders.concat(graphsonTwoDataProvider(), binaryDataTypes); + } + + @Test + @UseDataProvider("binaryDataProvider") + public void dataTypesTest(Object value) throws IOException { + verifySerDeBinary(value); + } + + @Test + @UseDataProvider("graphsonOneDataProvider") + public void dataTypesTestGraphsonOne(Object value) throws IOException { + verifySerDeGraphson(value, GraphProtocol.GRAPHSON_1_0); + } + + @Test + @UseDataProvider("graphsonTwoDataProvider") + public void dataTypesTestGraphsonTwo(Object value) throws IOException { + verifySerDeGraphson(value, GraphProtocol.GRAPHSON_2_0); + } + + @Test + public void complexUdtTests() throws IOException { + UserDefinedType type1 = + new UserDefinedTypeBuilder("ks", "udt1").withField("a", INT).withField("b", TEXT).build(); + verifySerDeBinary(type1.newValue(1, "2")); + + TupleType secondNested = tupleOf(BIGINT, listOf(BIGINT)); + TupleType firstNested = tupleOf(TEXT, secondNested); + + UserDefinedType type2 = + new UserDefinedTypeBuilder("ks", "udt2") + .withField("a", INT) + .withField("b", TEXT) + .withField("c", type1) + .withField("mylist", listOf(BIGINT)) + .withField("mytuple_withlist", firstNested) + .build(); + + verifySerDeBinary( + type2.newValue( + 1, + "2", + type1.newValue(3, "4"), + ImmutableList.of(5L), + firstNested.newValue("6", secondNested.newValue(7L, ImmutableList.of(8L))))); + + UserDefinedType type3 = + new UserDefinedTypeBuilder("ks", "udt3") + .withField("a", listOf(INT)) + .withField("b", setOf(FLOAT)) + .withField("c", mapOf(TEXT, BIGINT)) + .withField("d", listOf(listOf(DOUBLE))) + .withField("e", setOf(setOf(FLOAT))) + .withField("f", listOf(tupleOf(INT, TEXT))) + .build(); + + verifySerDeBinary( + type3.newValue( + ImmutableList.of(1), + ImmutableSet.of(2.1f), + ImmutableMap.of("3", 4L), + ImmutableList.of(ImmutableList.of(5.1d, 6.1d), ImmutableList.of(7.1d)), + ImmutableSet.of(ImmutableSet.of(8.1f), ImmutableSet.of(9.1f)), + ImmutableList.of(tupleOf(INT, TEXT).newValue(10, "11")))); + } + + @Test + public void complexTypesAndGeoTests() throws IOException { + + TupleType tuple = tupleOf(DseDataTypes.POINT, DseDataTypes.LINE_STRING, DseDataTypes.POLYGON); + tuple.attach(context); + + verifySerDeBinary( + tuple.newValue( + Point.fromCoordinates(3.3, 4.4), + LineString.fromPoints( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(3, 3)), + Polygon.fromPoints( + Point.fromCoordinates(3, 4), + Point.fromCoordinates(5, 4), + Point.fromCoordinates(6, 6)))); + + UserDefinedType udt = + new UserDefinedTypeBuilder("ks", "udt1") + .withField("a", DseDataTypes.POINT) + .withField("b", DseDataTypes.LINE_STRING) + .withField("c", DseDataTypes.POLYGON) + .build(); + udt.attach(context); + + verifySerDeBinary( + udt.newValue( + Point.fromCoordinates(3.3, 4.4), + LineString.fromPoints( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(3, 3)), + Polygon.fromPoints( + Point.fromCoordinates(3, 4), + Point.fromCoordinates(5, 4), + Point.fromCoordinates(6, 6)))); + } + + private void verifySerDeBinary(Object input) throws IOException { + Buffer result = graphBinaryModule.serialize(input); + Object deserialized = graphBinaryModule.deserialize(result); + result.release(); + assertThat(deserialized).isEqualTo(input); + } + + private void verifySerDeGraphson(Object input, GraphProtocol protocol) throws IOException { + ByteBuffer buffer = GraphSONUtils.serializeToByteBuffer(input, protocol); + Object deserialized = deserializeGraphson(buffer, protocol, input.getClass()); + + Object expected = (input instanceof Set) ? ImmutableList.copyOf((Set) input) : input; + assertThat(deserialized).isEqualTo(expected); + } + + private Object deserializeGraphson( + ByteBuffer buffer, GraphProtocol protocol, Class expectedClass) throws IOException { + List data = ImmutableList.of(buffer); + GraphNode node = GraphSONUtils.createGraphNode(data, protocol); + return node.as(expectedClass); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java new file mode 100644 index 00000000000..324c4ff4672 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/reactive/ReactiveGraphRequestProcessorTest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.reactive; + +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.createGraphBinaryModule; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.defaultDseFrameOf; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.tenGraphRows; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.DseTestDataProviders; +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode; +import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphResultSet; +import com.datastax.dse.driver.internal.core.cql.continuous.ContinuousCqlRequestHandlerTestBase; +import com.datastax.dse.driver.internal.core.graph.GraphProtocol; +import com.datastax.dse.driver.internal.core.graph.GraphRequestAsyncProcessor; +import com.datastax.dse.driver.internal.core.graph.GraphRequestHandlerTestHarness; +import com.datastax.dse.driver.internal.core.graph.GraphSupportChecker; +import com.datastax.dse.driver.internal.core.graph.binary.GraphBinaryModule; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.cql.PoolBehavior; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.reactivex.Flowable; +import java.io.IOException; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class ReactiveGraphRequestProcessorTest extends ContinuousCqlRequestHandlerTestBase { + + private GraphRequestAsyncProcessor asyncProcessor; + private GraphSupportChecker graphSupportChecker; + + @Before + public void setUp() { + DefaultDriverContext context = mock(DefaultDriverContext.class); + graphSupportChecker = mock(GraphSupportChecker.class); + asyncProcessor = Mockito.spy(new GraphRequestAsyncProcessor(context, graphSupportChecker)); + } + + @Test + public void should_be_able_to_process_graph_reactive_result_set() { + ReactiveGraphRequestProcessor processor = new ReactiveGraphRequestProcessor(asyncProcessor); + assertThat( + processor.canProcess( + ScriptGraphStatement.newInstance("g.V()"), + ReactiveGraphRequestProcessor.REACTIVE_GRAPH_RESULT_SET)) + .isTrue(); + } + + @Test + public void should_create_reactive_result_set() { + GraphRequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder().withProtocolVersion(DSE_V1); + try (GraphRequestHandlerTestHarness harness = builder.build()) { + ReactiveGraphRequestProcessor processor = new ReactiveGraphRequestProcessor(asyncProcessor); + GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); + assertThat( + processor.process(graphStatement, harness.getSession(), harness.getContext(), "test")) + .isInstanceOf(DefaultReactiveGraphResultSet.class); + } + } + + @Test + @UseDataProvider( + value = "allDseProtocolVersionsAndSupportedGraphProtocols", + location = DseTestDataProviders.class) + public void should_complete_single_page_result( + DseProtocolVersion version, GraphProtocol graphProtocol) throws IOException { + when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(false); + when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); + + GraphRequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (GraphRequestHandlerTestHarness harness = builder.build()) { + + DefaultSession session = harness.getSession(); + DefaultDriverContext context = harness.getContext(); + GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); + + GraphBinaryModule graphBinaryModule = createGraphBinaryModule(context); + when(asyncProcessor.getGraphBinaryModule()).thenReturn(graphBinaryModule); + + ReactiveGraphResultSet publisher = + new ReactiveGraphRequestProcessor(asyncProcessor) + .process(graphStatement, session, context, "test"); + + Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); + rowsPublisher.subscribe(); + + // emulate single page + node1Behavior.setResponseSuccess( + defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 1, true))); + + List rows = rowsPublisher.toList().blockingGet(); + + assertThat(rows).hasSize(10); + checkResultSet(rows); + + Flowable execInfosFlowable = + Flowable.fromPublisher(publisher.getExecutionInfos()); + assertThat(execInfosFlowable.toList().blockingGet()) + .hasSize(1) + .containsExactly(rows.get(0).getExecutionInfo()); + } + } + + @Test + @UseDataProvider( + value = "allDseProtocolVersionsAndSupportedGraphProtocols", + location = DseTestDataProviders.class) + public void should_complete_multi_page_result( + DseProtocolVersion version, GraphProtocol graphProtocol) throws IOException { + when(graphSupportChecker.isPagingEnabled(any(), any())).thenReturn(true); + when(graphSupportChecker.inferGraphProtocol(any(), any(), any())).thenReturn(graphProtocol); + + GraphRequestHandlerTestHarness.Builder builder = + GraphRequestHandlerTestHarness.builder().withProtocolVersion(version); + PoolBehavior node1Behavior = builder.customBehavior(node1); + try (GraphRequestHandlerTestHarness harness = builder.build()) { + + DefaultSession session = harness.getSession(); + DefaultDriverContext context = harness.getContext(); + GraphStatement graphStatement = ScriptGraphStatement.newInstance("g.V()"); + + GraphBinaryModule graphBinaryModule = createGraphBinaryModule(context); + when(asyncProcessor.getGraphBinaryModule()).thenReturn(graphBinaryModule); + + ReactiveGraphResultSet publisher = + new ReactiveGraphRequestProcessor(asyncProcessor) + .process(graphStatement, session, context, "test"); + + Flowable rowsPublisher = Flowable.fromPublisher(publisher).cache(); + rowsPublisher.subscribe(); + + // emulate page 1 + node1Behavior.setResponseSuccess( + defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 1, false))); + // emulate page 2 + node1Behavior.setResponseSuccess( + defaultDseFrameOf(tenGraphRows(graphProtocol, graphBinaryModule, 2, true))); + + List rows = rowsPublisher.toList().blockingGet(); + assertThat(rows).hasSize(20); + checkResultSet(rows); + + Flowable execInfosFlowable = + Flowable.fromPublisher(publisher.getExecutionInfos()); + assertThat(execInfosFlowable.toList().blockingGet()) + .hasSize(2) + .containsExactly(rows.get(0).getExecutionInfo(), rows.get(10).getExecutionInfo()); + } + } + + private void checkResultSet(List rows) { + for (ReactiveGraphNode row : rows) { + assertThat(row.isVertex()).isTrue(); + ExecutionInfo executionInfo = row.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node1); + assertThat(executionInfo.getErrors()).isEmpty(); + assertThat(executionInfo.getIncomingPayload()).isEmpty(); + assertThat(executionInfo.getSpeculativeExecutionCount()).isEqualTo(0); + assertThat(executionInfo.getSuccessfulExecutionIndex()).isEqualTo(0); + assertThat(executionInfo.getWarnings()).isEmpty(); + } + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java new file mode 100644 index 00000000000..0d05f129520 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/graph/schema/refresh/GraphSchemaRefreshTest.java @@ -0,0 +1,418 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.graph.schema.refresh; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.metadata.schema.DseEdgeMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseGraphTableMetadata; +import com.datastax.dse.driver.api.core.metadata.schema.DseVertexMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseEdgeMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseVertexMetadata; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; +import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultColumnMetadata; +import com.datastax.oss.driver.internal.core.metadata.schema.events.KeyspaceChangeEvent; +import com.datastax.oss.driver.internal.core.metadata.schema.events.TableChangeEvent; +import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Collections; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class GraphSchemaRefreshTest { + + private static final DefaultDseTableMetadata OLD_TABLE = + newTable( + CqlIdentifier.fromInternal("ks_with_engine"), + CqlIdentifier.fromInternal("tbl"), + null, + null); + private static final DefaultDseKeyspaceMetadata OLD_KS1 = newKeyspace("ks1", null); + private static final DefaultDseKeyspaceMetadata KS_WITH_ENGINE = + newKeyspace( + CqlIdentifier.fromInternal("ks_with_engine"), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), OLD_TABLE)); + + @Mock private InternalDriverContext context; + @Mock private ChannelFactory channelFactory; + private DefaultMetadata oldMetadata; + + @Before + public void setup() { + when(context.getChannelFactory()).thenReturn(channelFactory); + oldMetadata = + DefaultMetadata.EMPTY.withSchema( + ImmutableMap.of(OLD_KS1.getName(), OLD_KS1, KS_WITH_ENGINE.getName(), KS_WITH_ENGINE), + false, + context); + } + + @Test + public void should_detect_created_keyspace_without_graph_engine() { + DefaultDseKeyspaceMetadata ks2 = newKeyspace("ks2", null); + SchemaRefresh refresh = + new SchemaRefresh( + ImmutableMap.of( + OLD_KS1.getName(), + OLD_KS1, + KS_WITH_ENGINE.getName(), + KS_WITH_ENGINE, + ks2.getName(), + ks2)); + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(3); + assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); + } + + @Test + public void should_detect_created_keyspace_with_graph_engine() { + DefaultDseKeyspaceMetadata ks2 = newKeyspace("ks2", "Core"); + SchemaRefresh refresh = + new SchemaRefresh( + ImmutableMap.of( + OLD_KS1.getName(), + OLD_KS1, + KS_WITH_ENGINE.getName(), + KS_WITH_ENGINE, + ks2.getName(), + ks2)); + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(3); + assertThat(result.events).containsExactly(KeyspaceChangeEvent.created(ks2)); + } + + @Test + public void should_detect_top_level_graph_engine_update_in_keyspace() { + // Change only one top-level option (graph_engine) + DefaultDseKeyspaceMetadata newKs1 = newKeyspace("ks1", "Core"); + SchemaRefresh refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), KS_WITH_ENGINE, OLD_KS1.getName(), newKs1)); + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events).containsExactly(KeyspaceChangeEvent.updated(OLD_KS1, newKs1)); + } + + @Test + public void should_detect_adding_and_renaming_and_removing_vertex_label() { + DefaultDseTableMetadata newTable = + newTable( + KS_WITH_ENGINE.getName(), + CqlIdentifier.fromInternal("tbl"), + new DefaultDseVertexMetadata(CqlIdentifier.fromInternal("someLabel")), + null); + DefaultDseKeyspaceMetadata ks = + newKeyspace( + KS_WITH_ENGINE.getName(), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); + SchemaRefresh refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); + assertThat(result.newMetadata.getKeyspaces().get(KS_WITH_ENGINE.getName())).isNotNull(); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getVertex()) + .isNotNull(); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getVertex() + .get() + .getLabelName() + .asInternal()) + .isEqualTo("someLabel"); + + // now rename the vertex label + newTable = + newTable( + KS_WITH_ENGINE.getName(), + CqlIdentifier.fromInternal("tbl"), + new DefaultDseVertexMetadata(CqlIdentifier.fromInternal("someNewLabel")), + null); + ks = + newKeyspace( + KS_WITH_ENGINE.getName(), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); + refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); + result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getVertex() + .get() + .getLabelName() + .asInternal()) + .isEqualTo("someNewLabel"); + + // now remove the vertex label from the table + DefaultMetadata metadataWithVertexLabel = result.newMetadata; + DefaultDseTableMetadata tableWithRemovedLabel = + newTable(KS_WITH_ENGINE.getName(), CqlIdentifier.fromInternal("tbl"), null, null); + ks = + newKeyspace( + KS_WITH_ENGINE.getName(), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), tableWithRemovedLabel)); + refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); + result = refresh.compute(metadataWithVertexLabel, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events) + .containsExactly(TableChangeEvent.updated(newTable, tableWithRemovedLabel)); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getVertex() + .isPresent()) + .isFalse(); + } + + @Test + public void should_detect_adding_and_renaming_and_removing_edge_label() { + DefaultDseTableMetadata newTable = + newTable( + KS_WITH_ENGINE.getName(), + CqlIdentifier.fromInternal("tbl"), + null, + newEdgeMetadata( + CqlIdentifier.fromInternal("created"), + CqlIdentifier.fromInternal("person"), + CqlIdentifier.fromInternal("software"))); + DefaultDseKeyspaceMetadata ks = + newKeyspace( + KS_WITH_ENGINE.getName(), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); + SchemaRefresh refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); + assertThat(result.newMetadata.getKeyspaces().get(KS_WITH_ENGINE.getName())).isNotNull(); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getVertex()) + .isNotNull(); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getEdge() + .get() + .getLabelName() + .asInternal()) + .isEqualTo("created"); + + // now rename the edge label + newTable = + newTable( + KS_WITH_ENGINE.getName(), + CqlIdentifier.fromInternal("tbl"), + null, + newEdgeMetadata( + CqlIdentifier.fromInternal("CHANGED"), + CqlIdentifier.fromInternal("person"), + CqlIdentifier.fromInternal("software"))); + ks = + newKeyspace( + KS_WITH_ENGINE.getName(), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), newTable)); + refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); + result = refresh.compute(oldMetadata, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events).containsExactly(TableChangeEvent.updated(OLD_TABLE, newTable)); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getEdge() + .get() + .getLabelName() + .asInternal()) + .isEqualTo("CHANGED"); + + // now remove the edge label from the table + DefaultMetadata metadataWithEdgeLabel = result.newMetadata; + DefaultDseTableMetadata tableWithRemovedLabel = + newTable(KS_WITH_ENGINE.getName(), CqlIdentifier.fromInternal("tbl"), null, null); + ks = + newKeyspace( + KS_WITH_ENGINE.getName(), + "Core", + ImmutableMap.of(CqlIdentifier.fromInternal("tbl"), tableWithRemovedLabel)); + refresh = + new SchemaRefresh( + ImmutableMap.of(KS_WITH_ENGINE.getName(), ks, OLD_KS1.getName(), OLD_KS1)); + result = refresh.compute(metadataWithEdgeLabel, false, context); + assertThat(result.newMetadata.getKeyspaces()).hasSize(2); + assertThat(result.events) + .containsExactly(TableChangeEvent.updated(newTable, tableWithRemovedLabel)); + assertThat( + ((DseGraphTableMetadata) + result + .newMetadata + .getKeyspaces() + .get(KS_WITH_ENGINE.getName()) + .getTable("tbl") + .get()) + .getEdge() + .isPresent()) + .isFalse(); + } + + private static DefaultDseKeyspaceMetadata newKeyspace(String name, String graphEngine) { + return new DefaultDseKeyspaceMetadata( + CqlIdentifier.fromInternal(name), + false, + false, + graphEngine, + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap()); + } + + private static DefaultDseKeyspaceMetadata newKeyspace( + CqlIdentifier name, String graphEngine, @NonNull Map tables) { + return new DefaultDseKeyspaceMetadata( + name, + false, + false, + graphEngine, + Collections.emptyMap(), + Collections.emptyMap(), + tables, + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap()); + } + + private static DefaultDseTableMetadata newTable( + @NonNull CqlIdentifier keyspace, + @NonNull CqlIdentifier name, + @Nullable DseVertexMetadata vertex, + @Nullable DseEdgeMetadata edge) { + ImmutableList cols = + ImmutableList.of( + new DefaultColumnMetadata( + keyspace, + CqlIdentifier.fromInternal("parent"), + CqlIdentifier.fromInternal("id"), + DataTypes.INT, + false)); + return new DefaultDseTableMetadata( + keyspace, + name, + null, + false, + false, + cols, + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + vertex, + edge); + } + + private static DefaultDseEdgeMetadata newEdgeMetadata( + @NonNull CqlIdentifier labelName, + @NonNull CqlIdentifier fromTable, + @NonNull CqlIdentifier toTable) { + return new DefaultDseEdgeMetadata( + labelName, + fromTable, + fromTable, + Collections.emptyList(), + Collections.emptyList(), + toTable, + toTable, + Collections.emptyList(), + Collections.emptyList()); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java new file mode 100644 index 00000000000..85af9b5691b --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/AddressFormatterTest.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class AddressFormatterTest { + + @Test + @UseDataProvider("addressesProvider") + public void should_format_addresses(Object address, String expected) { + // when + String result = AddressFormatter.nullSafeToString(address); + + // then + assertThat(result).isEqualTo(expected); + } + + @DataProvider + public static Object[][] addressesProvider() throws UnknownHostException { + return new Object[][] { + {new InetSocketAddress(8888), "0.0.0.0:8888"}, + {new InetSocketAddress("127.0.0.1", 8888), "127.0.0.1:8888"}, + {InetSocketAddress.createUnresolved("127.0.0.2", 8080), "127.0.0.2:8080"}, + {InetAddress.getByName("127.0.0.1"), "127.0.0.1"}, + }; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java new file mode 100644 index 00000000000..d5466b23dbc --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ConfigAntiPatternsFinderTest.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_HOSTNAME_VALIDATION; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Collections; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class ConfigAntiPatternsFinderTest { + + private static final ImmutableMap SSL_ANTI_PATTERN = + ImmutableMap.of( + "sslWithoutCertValidation", + "Client-to-node encryption is enabled but server certificate validation is disabled"); + + @Test + @UseDataProvider("sslConfigProvider") + public void should_find_ssl_anti_pattern( + boolean sslEngineFactoryClassDefined, + boolean hostnameValidation, + Map expected) { + // given + InternalDriverContext context = + mockDefaultProfile(sslEngineFactoryClassDefined, hostnameValidation); + + // when + Map antiPatterns = new ConfigAntiPatternsFinder().findAntiPatterns(context); + + // then + assertThat(antiPatterns).isEqualTo(expected); + } + + private InternalDriverContext mockDefaultProfile( + boolean sslEngineFactoryClassDefined, boolean hostnameValidation) { + InternalDriverContext context = mock(InternalDriverContext.class); + DriverConfig driverConfig = mock(DriverConfig.class); + when(context.getConfig()).thenReturn(driverConfig); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + when(profile.isDefined(SSL_ENGINE_FACTORY_CLASS)).thenReturn(sslEngineFactoryClassDefined); + when(profile.getBoolean(SSL_HOSTNAME_VALIDATION, false)).thenReturn(hostnameValidation); + when(driverConfig.getDefaultProfile()).thenReturn(profile); + return context; + } + + @DataProvider + public static Object[][] sslConfigProvider() { + return new Object[][] { + {true, true, Collections.emptyMap()}, + {true, false, SSL_ANTI_PATTERN}, + {false, false, Collections.emptyMap()}, + {false, true, Collections.emptyMap()} + }; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java new file mode 100644 index 00000000000..dde6db6059e --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/DataCentersFinderTest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Collection; +import java.util.Set; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class DataCentersFinderTest { + + @Test + @UseDataProvider("hostProvider") + public void should_detect_data_centers( + int numberOfRemoteHosts, + String dc1, + NodeDistance h1Distance, + String dc2, + NodeDistance h2Distance, + Set expected) { + // given + DriverExecutionProfile executionProfile = mock(DriverExecutionProfile.class); + when(executionProfile.getInt(CONNECTION_POOL_REMOTE_SIZE)).thenReturn(numberOfRemoteHosts); + Collection nodes = mockNodes(dc1, h1Distance, dc2, h2Distance); + + // when + Set dataCenters = new DataCentersFinder().getDataCenters(nodes, executionProfile); + + // then + assertThat(dataCenters).isEqualTo(Sets.newHashSet(expected)); + } + + @DataProvider + public static Object[][] hostProvider() { + return new Object[][] { + {1, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc1", "dc2")}, + {1, "dc1", NodeDistance.LOCAL, "dc1", NodeDistance.REMOTE, Sets.newHashSet("dc1")}, + {0, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc1")}, + {0, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet()}, + {1, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet("dc2")}, + {1, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.IGNORED, Sets.newHashSet("dc1")}, + {0, "dc1", NodeDistance.IGNORED, "dc2", NodeDistance.REMOTE, Sets.newHashSet()}, + {0, "dc1", NodeDistance.LOCAL, "dc2", NodeDistance.IGNORED, Sets.newHashSet("dc1")}, + }; + } + + private Collection mockNodes( + String dc1, NodeDistance h1Distance, String dc2, NodeDistance h2Distance) { + Node n1 = mock(Node.class); + when(n1.getDatacenter()).thenReturn(dc1); + when(n1.getDistance()).thenReturn(h1Distance); + + Node n2 = mock(Node.class); + when(n2.getDatacenter()).thenReturn(dc2); + when(n2.getDistance()).thenReturn(h2Distance); + + return ImmutableSet.of(n1, n2); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java new file mode 100644 index 00000000000..de0f3a9d60b --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfileMockUtil.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.dse.driver.api.core.config.DseDriverOption.GRAPH_TRAVERSAL_SOURCE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.AUTH_PROVIDER_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.HEARTBEAT_INTERVAL; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_COMPRESSION; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.RECONNECTION_BASE_DELAY; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_CONSISTENCY; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_MAX; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import java.time.Duration; + +class ExecutionProfileMockUtil { + static final String DEFAULT_LOCAL_DC = "local-dc"; + static final int SPECEX_MAX_DEFAULT = 100; + static final int SPECEX_DELAY_DEFAULT = 20; + + static DriverExecutionProfile mockDefaultExecutionProfile() { + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + + when(profile.getDuration(REQUEST_TIMEOUT)).thenReturn(Duration.ofMillis(100)); + when(profile.getString(LOAD_BALANCING_POLICY_CLASS)).thenReturn("LoadBalancingPolicyImpl"); + when(profile.isDefined(LOAD_BALANCING_DISTANCE_EVALUATOR_CLASS)).thenReturn(true); + when(profile.isDefined(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(true); + when(profile.getString(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(DEFAULT_LOCAL_DC); + when(profile.isDefined(SPECULATIVE_EXECUTION_MAX)).thenReturn(true); + when(profile.getInt(SPECULATIVE_EXECUTION_MAX)).thenReturn(SPECEX_MAX_DEFAULT); + when(profile.isDefined(SPECULATIVE_EXECUTION_DELAY)).thenReturn(true); + when(profile.getInt(SPECULATIVE_EXECUTION_DELAY)).thenReturn(SPECEX_DELAY_DEFAULT); + when(profile.getString(SPECULATIVE_EXECUTION_POLICY_CLASS)) + .thenReturn("SpeculativeExecutionImpl"); + when(profile.getString(REQUEST_CONSISTENCY)).thenReturn("LOCAL_ONE"); + when(profile.getString(REQUEST_SERIAL_CONSISTENCY)).thenReturn("SERIAL"); + when(profile.getInt(CONNECTION_POOL_LOCAL_SIZE)).thenReturn(2); + when(profile.getInt(CONNECTION_POOL_REMOTE_SIZE)).thenReturn(1); + when(profile.getString(eq(PROTOCOL_COMPRESSION), any())).thenReturn("none"); + when(profile.getDuration(HEARTBEAT_INTERVAL)).thenReturn(Duration.ofMillis(100)); + when(profile.getDuration(RECONNECTION_BASE_DELAY)).thenReturn(Duration.ofMillis(100)); + when(profile.isDefined(SSL_ENGINE_FACTORY_CLASS)).thenReturn(true); + when(profile.getString(eq(AUTH_PROVIDER_CLASS), any())).thenReturn("AuthProviderImpl"); + when(profile.getString(GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("src-graph"); + return profile; + } + + static DriverExecutionProfile mockNonDefaultRequestTimeoutExecutionProfile() { + DriverExecutionProfile profile = mockDefaultExecutionProfile(); + when(profile.getDuration(REQUEST_TIMEOUT)).thenReturn(Duration.ofMillis(50)); + return profile; + } + + static DriverExecutionProfile mockNonDefaultLoadBalancingExecutionProfile() { + DriverExecutionProfile profile = mockDefaultExecutionProfile(); + when(profile.getString(LOAD_BALANCING_POLICY_CLASS)).thenReturn("NonDefaultLoadBalancing"); + return profile; + } + + static DriverExecutionProfile mockUndefinedLocalDcExecutionProfile() { + DriverExecutionProfile profile = mockNonDefaultLoadBalancingExecutionProfile(); + when(profile.isDefined(LOAD_BALANCING_LOCAL_DATACENTER)).thenReturn(false); + return profile; + } + + static DriverExecutionProfile mockNonDefaultSpeculativeExecutionInfo() { + DriverExecutionProfile profile = mockDefaultExecutionProfile(); + when(profile.getString(SPECULATIVE_EXECUTION_POLICY_CLASS)) + .thenReturn("NonDefaultSpecexPolicy"); + return profile; + } + + static DriverExecutionProfile mockNonDefaultConsistency() { + DriverExecutionProfile profile = mockDefaultExecutionProfile(); + when(profile.getString(REQUEST_CONSISTENCY)).thenReturn("ALL"); + return profile; + } + + static DriverExecutionProfile mockNonDefaultSerialConsistency() { + DriverExecutionProfile profile = mockDefaultExecutionProfile(); + when(profile.getString(REQUEST_SERIAL_CONSISTENCY)).thenReturn("ONE"); + return profile; + } + + static DriverExecutionProfile mockNonDefaultGraphOptions() { + DriverExecutionProfile profile = mockDefaultExecutionProfile(); + when(profile.getString(GRAPH_TRAVERSAL_SOURCE, null)).thenReturn("non-default-graph"); + return profile; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java new file mode 100644 index 00000000000..fc92ab20521 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ExecutionProfilesInfoFinderTest.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.DEFAULT_LOCAL_DC; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.SPECEX_DELAY_DEFAULT; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.SPECEX_MAX_DEFAULT; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockDefaultExecutionProfile; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultConsistency; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultGraphOptions; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultLoadBalancingExecutionProfile; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultRequestTimeoutExecutionProfile; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultSerialConsistency; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultSpeculativeExecutionInfo; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockUndefinedLocalDcExecutionProfile; +import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_LOAD_BALANCING_PACKAGE; +import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_SPECULATIVE_EXECUTION_PACKAGE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; +import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; +import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; + +@RunWith(DataProviderRunner.class) +public class ExecutionProfilesInfoFinderTest { + + @Test + public void should_include_info_about_default_profile() { + // given + DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); + Map profiles = + ImmutableMap.of("default", defaultExecutionProfile); + + InternalDriverContext context = + mockDriverContextWithProfiles(defaultExecutionProfile, profiles); + + // when + Map executionProfilesInfo = + new ExecutionProfilesInfoFinder().getExecutionProfilesInfo(context); + + // then + assertThat(executionProfilesInfo) + .isEqualTo( + ImmutableMap.of( + "default", + new SpecificExecutionProfile( + 100, + new LoadBalancingInfo( + "LoadBalancingPolicyImpl", + ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), + DEFAULT_LOAD_BALANCING_PACKAGE), + new SpeculativeExecutionInfo( + "SpeculativeExecutionImpl", + ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), + DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), + "LOCAL_ONE", + "SERIAL", + ImmutableMap.of("source", "src-graph")))); + } + + @Test + @UseDataProvider("executionProfileProvider") + public void should_include_info_about_default_profile_and_only_difference_for_specific_profile( + DriverExecutionProfile nonDefaultExecutionProfile, SpecificExecutionProfile expected) { + // given + + DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); + Map profiles = + ImmutableMap.of( + "default", defaultExecutionProfile, "non-default", nonDefaultExecutionProfile); + InternalDriverContext context = + mockDriverContextWithProfiles(defaultExecutionProfile, profiles); + // when + Map executionProfilesInfo = + new ExecutionProfilesInfoFinder().getExecutionProfilesInfo(context); + + // then + assertThat(executionProfilesInfo) + .isEqualTo( + ImmutableMap.of( + "default", + new SpecificExecutionProfile( + 100, + new LoadBalancingInfo( + "LoadBalancingPolicyImpl", + ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), + DEFAULT_LOAD_BALANCING_PACKAGE), + new SpeculativeExecutionInfo( + "SpeculativeExecutionImpl", + ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), + DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), + "LOCAL_ONE", + "SERIAL", + ImmutableMap.of("source", "src-graph")), + "non-default", + expected)); + } + + @DataProvider + public static Object[][] executionProfileProvider() { + return new Object[][] { + { + mockNonDefaultRequestTimeoutExecutionProfile(), + new SpecificExecutionProfile(50, null, null, null, null, null) + }, + { + mockNonDefaultLoadBalancingExecutionProfile(), + new SpecificExecutionProfile( + null, + new LoadBalancingInfo( + "NonDefaultLoadBalancing", + ImmutableMap.of("localDataCenter", DEFAULT_LOCAL_DC, "filterFunction", true), + DEFAULT_LOAD_BALANCING_PACKAGE), + null, + null, + null, + null) + }, + { + mockUndefinedLocalDcExecutionProfile(), + new SpecificExecutionProfile( + null, + new LoadBalancingInfo( + "NonDefaultLoadBalancing", + ImmutableMap.of("filterFunction", true), + DEFAULT_LOAD_BALANCING_PACKAGE), + null, + null, + null, + null) + }, + { + mockNonDefaultSpeculativeExecutionInfo(), + new SpecificExecutionProfile( + null, + null, + new SpeculativeExecutionInfo( + "NonDefaultSpecexPolicy", + ImmutableMap.of( + "maxSpeculativeExecutions", SPECEX_MAX_DEFAULT, "delay", SPECEX_DELAY_DEFAULT), + DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), + null, + null, + null) + }, + { + mockNonDefaultConsistency(), + new SpecificExecutionProfile(null, null, null, "ALL", null, null) + }, + { + mockNonDefaultSerialConsistency(), + new SpecificExecutionProfile(null, null, null, null, "ONE", null) + }, + { + mockNonDefaultGraphOptions(), + new SpecificExecutionProfile( + null, null, null, null, null, ImmutableMap.of("source", "non-default-graph")) + }, + { + mockDefaultExecutionProfile(), + new SpecificExecutionProfile(null, null, null, null, null, null) + } + }; + } + + @Test + public void should_not_include_null_fields_in_json() throws JsonProcessingException { + // given + SpecificExecutionProfile specificExecutionProfile = + new SpecificExecutionProfile(50, null, null, "ONE", null, ImmutableMap.of("a", "b")); + + // when + String result = new ObjectMapper().writeValueAsString(specificExecutionProfile); + + // then + assertThat(result) + .isEqualTo("{\"readTimeout\":50,\"consistency\":\"ONE\",\"graphOptions\":{\"a\":\"b\"}}"); + } + + @Test + public void should_include_empty_execution_profile_if_has_all_nulls() + throws JsonProcessingException { + // given + Map executionProfiles = + ImmutableMap.of("p", new SpecificExecutionProfile(null, null, null, null, null, null)); + + // when + String result = new ObjectMapper().writeValueAsString(executionProfiles); + + // then + assertThat(result).isEqualTo("{\"p\":{}}"); + } + + private InternalDriverContext mockDriverContextWithProfiles( + DriverExecutionProfile defaultExecutionProfile, + Map profiles) { + InternalDriverContext context = mock(InternalDriverContext.class); + DriverConfig driverConfig = mock(DriverConfig.class); + Mockito.>when(driverConfig.getProfiles()) + .thenReturn(profiles); + when(driverConfig.getDefaultProfile()).thenReturn(defaultExecutionProfile); + when(context.getConfig()).thenReturn(driverConfig); + return context; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java new file mode 100644 index 00000000000..74869893b72 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsClientTest.java @@ -0,0 +1,535 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockDefaultExecutionProfile; +import static com.datastax.dse.driver.internal.core.insights.ExecutionProfileMockUtil.mockNonDefaultRequestTimeoutExecutionProfile; +import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_AUTH_PROVIDER_PACKAGE; +import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_LOAD_BALANCING_PACKAGE; +import static com.datastax.dse.driver.internal.core.insights.PackageUtil.DEFAULT_SPECULATIVE_EXECUTION_PACKAGE; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; +import com.datastax.dse.driver.internal.core.insights.schema.AuthProviderType; +import com.datastax.dse.driver.internal.core.insights.schema.Insight; +import com.datastax.dse.driver.internal.core.insights.schema.InsightMetadata; +import com.datastax.dse.driver.internal.core.insights.schema.InsightType; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.CPUS; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.OS; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsStartupData; +import com.datastax.dse.driver.internal.core.insights.schema.InsightsStatusData; +import com.datastax.dse.driver.internal.core.insights.schema.LoadBalancingInfo; +import com.datastax.dse.driver.internal.core.insights.schema.PoolSizeByHostDistance; +import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; +import com.datastax.dse.driver.internal.core.insights.schema.SSL; +import com.datastax.dse.driver.internal.core.insights.schema.SessionStateForNode; +import com.datastax.dse.driver.internal.core.insights.schema.SpecificExecutionProfile; +import com.datastax.dse.driver.internal.core.insights.schema.SpeculativeExecutionInfo; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.channel.DriverChannel; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.StartupOptionsBuilder; +import com.datastax.oss.driver.internal.core.control.ControlConnection; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metadata.MetadataManager; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.PoolManager; +import com.datastax.oss.driver.shaded.guava.common.base.Suppliers; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.netty.channel.DefaultEventLoop; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; + +@RunWith(DataProviderRunner.class) +public class InsightsClientTest { + private static final StackTraceElement[] EMPTY_STACK_TRACE = {}; + private static final Map EMPTY_OBJECT_MAP = Collections.emptyMap(); + private static final Supplier MOCK_TIME_SUPPLIER = Suppliers.ofInstance(1L); + private static final InsightsConfiguration INSIGHTS_CONFIGURATION = + new InsightsConfiguration(true, 300000L, new DefaultEventLoop()); + + @Test + public void should_construct_json_event_startup_message() throws IOException { + // given + DefaultDriverContext context = mockDefaultDriverContext(); + PlatformInfoFinder platformInfoFinder = mock(PlatformInfoFinder.class); + OS os = new OS("linux", "1.2", "x64"); + CPUS cpus = new CPUS(8, "intel i7"); + Map javaDeps = + ImmutableMap.of("version", new RuntimeAndCompileTimeVersions("1.8.0", "1.8.0", false)); + Map> runtimeInfo = + ImmutableMap.of("java", javaDeps); + InsightsPlatformInfo insightsPlatformInfo = new InsightsPlatformInfo(os, cpus, runtimeInfo); + when(platformInfoFinder.getInsightsPlatformInfo()).thenReturn(insightsPlatformInfo); + + ConfigAntiPatternsFinder configAntiPatternsFinder = mock(ConfigAntiPatternsFinder.class); + when(configAntiPatternsFinder.findAntiPatterns(any(DefaultDriverContext.class))) + .thenReturn( + ImmutableMap.of( + "contactPointsMultipleDCs", + "Contact points contain hosts from multiple data centers")); + + DataCentersFinder dataCentersFinder = mock(DataCentersFinder.class); + when(dataCentersFinder.getDataCenters(any(DefaultDriverContext.class))) + .thenReturn(Sets.newHashSet("dc1", "dc2")); + ReconnectionPolicyInfoFinder reconnectionPolicyInfoFinder = + mock(ReconnectionPolicyInfoFinder.class); + when(reconnectionPolicyInfoFinder.getReconnectionPolicyInfo(any(), any())) + .thenReturn( + new ReconnectionPolicyInfo( + "reconnection-policy-a", ImmutableMap.of("opt-a", 1), "com.datastax.dse")); + + InsightsClient insightsClient = + new InsightsClient( + context, + MOCK_TIME_SUPPLIER, + INSIGHTS_CONFIGURATION, + platformInfoFinder, + reconnectionPolicyInfoFinder, + new ExecutionProfilesInfoFinder(), + configAntiPatternsFinder, + dataCentersFinder, + EMPTY_STACK_TRACE); + + // when + String startupMessage = insightsClient.createStartupMessage(); + Insight insight = + new ObjectMapper() + .readValue(startupMessage, new TypeReference>() {}); + + // then + assertThat(insight.getMetadata()) + .isEqualTo( + new InsightMetadata( + "driver.startup", + 1L, + ImmutableMap.of("language", "java"), + InsightType.EVENT, + "v1")); + + InsightsStartupData insightData = insight.getInsightData(); + assertThat(insightData.getClientId()).isEqualTo("client-id"); + assertThat(insightData.getSessionId()).isNotNull(); + assertThat(insightData.getDriverName()).isEqualTo("DataStax Enterprise Java Driver"); + assertThat(insightData.getDriverVersion()).isNotEmpty(); + assertThat(insightData.getApplicationName()).isEqualTo("app-name"); + assertThat(insightData.getApplicationVersion()).isEqualTo("1.0.0"); + assertThat(insightData.isApplicationNameWasGenerated()).isEqualTo(false); + assertThat(insightData.getContactPoints()) + .isEqualTo(ImmutableMap.of("localhost", Collections.singletonList("127.0.0.1:9999"))); + + assertThat(insightData.getInitialControlConnection()).isEqualTo("127.0.0.1:10"); + assertThat(insightData.getLocalAddress()).isEqualTo("127.0.0.1"); + assertThat(insightData.getHostName()).isNotEmpty(); + assertThat(insightData.getProtocolVersion()).isEqualTo(DSE_V2.getCode()); + assertThat(insightData.getExecutionProfiles()) + .isEqualTo( + ImmutableMap.of( + "default", + new SpecificExecutionProfile( + 100, + new LoadBalancingInfo( + "LoadBalancingPolicyImpl", + ImmutableMap.of("localDataCenter", "local-dc", "filterFunction", true), + DEFAULT_LOAD_BALANCING_PACKAGE), + new SpeculativeExecutionInfo( + "SpeculativeExecutionImpl", + ImmutableMap.of("maxSpeculativeExecutions", 100, "delay", 20), + DEFAULT_SPECULATIVE_EXECUTION_PACKAGE), + "LOCAL_ONE", + "SERIAL", + ImmutableMap.of("source", "src-graph")), + "non-default", + new SpecificExecutionProfile(50, null, null, null, null, null))); + assertThat(insightData.getPoolSizeByHostDistance()) + .isEqualTo(new PoolSizeByHostDistance(2, 1, 0)); + assertThat(insightData.getHeartbeatInterval()).isEqualTo(100); + assertThat(insightData.getCompression()).isEqualTo("none"); + assertThat(insightData.getReconnectionPolicy()) + .isEqualTo( + new ReconnectionPolicyInfo( + "reconnection-policy-a", ImmutableMap.of("opt-a", 1), "com.datastax.dse")); + assertThat(insightData.getSsl()).isEqualTo(new SSL(true, false)); + assertThat(insightData.getAuthProvider()) + .isEqualTo(new AuthProviderType("AuthProviderImpl", DEFAULT_AUTH_PROVIDER_PACKAGE)); + assertThat(insightData.getOtherOptions()).isEqualTo(EMPTY_OBJECT_MAP); + assertThat(insightData.getPlatformInfo()).isEqualTo(insightsPlatformInfo); + assertThat(insightData.getConfigAntiPatterns()) + .isEqualTo( + ImmutableMap.of( + "contactPointsMultipleDCs", + "Contact points contain hosts from multiple data centers")); + assertThat(insightData.getPeriodicStatusInterval()).isEqualTo(300); + assertThat(insightData.getDataCenters()).isEqualTo(Sets.newHashSet("dc1", "dc2")); + } + + @Test + public void should_group_contact_points_by_host_name() { + // given + Set contactPoints = + ImmutableSet.of( + InetSocketAddress.createUnresolved("127.0.0.1", 8080), + InetSocketAddress.createUnresolved("127.0.0.1", 8081), + InetSocketAddress.createUnresolved("127.0.0.2", 8081)); + + Map> expected = + ImmutableMap.of( + "127.0.0.1", + ImmutableList.of("127.0.0.1:8080", "127.0.0.1:8081"), + "127.0.0.2", + ImmutableList.of("127.0.0.2:8081")); + + // when + Map> resolvedContactPoints = + InsightsClient.getResolvedContactPoints(contactPoints); + + // then + assertThat(resolvedContactPoints).isEqualTo(expected); + } + + @Test + public void should_construct_json_event_status_message() throws IOException { + // given + InsightsClient insightsClient = + new InsightsClient( + mockDefaultDriverContext(), + MOCK_TIME_SUPPLIER, + INSIGHTS_CONFIGURATION, + null, + null, + null, + null, + null, + EMPTY_STACK_TRACE); + + // when + String statusMessage = insightsClient.createStatusMessage(); + + // then + Insight insight = + new ObjectMapper() + .readValue(statusMessage, new TypeReference>() {}); + assertThat(insight.getMetadata()) + .isEqualTo( + new InsightMetadata( + "driver.status", 1L, ImmutableMap.of("language", "java"), InsightType.EVENT, "v1")); + InsightsStatusData insightData = insight.getInsightData(); + assertThat(insightData.getClientId()).isEqualTo("client-id"); + assertThat(insightData.getSessionId()).isNotNull(); + assertThat(insightData.getControlConnection()).isEqualTo("127.0.0.1:10"); + assertThat(insightData.getConnectedNodes()) + .isEqualTo( + ImmutableMap.of( + "127.0.0.1:10", new SessionStateForNode(1, 10), + "127.0.0.1:20", new SessionStateForNode(2, 20))); + } + + @Test + public void should_schedule_task_with_initial_delay() { + // given + final AtomicInteger counter = new AtomicInteger(); + Runnable runnable = counter::incrementAndGet; + + // when + InsightsClient.scheduleInsightsTask(100L, Executors.newScheduledThreadPool(1), runnable); + + // then + await().atMost(1, SECONDS).until(() -> counter.get() >= 1); + } + + @Test + @UseDataProvider(value = "stackTraceProvider") + public void should_get_caller_of_create_cluster(StackTraceElement[] stackTrace, String expected) { + // when + String result = InsightsClient.getClusterCreateCaller(stackTrace); + + // then + assertThat(result).isEqualTo(expected); + } + + @Test + @SuppressWarnings("ResultOfMethodCallIgnored") + public void should_execute_should_send_event_check_only_once() + throws UnknownHostException, InterruptedException { + // given + InsightsConfiguration insightsConfiguration = mock(InsightsConfiguration.class); + when(insightsConfiguration.isMonitorReportingEnabled()).thenReturn(true); + when(insightsConfiguration.getStatusEventDelayMillis()).thenReturn(10L); + when(insightsConfiguration.getExecutor()).thenReturn(new DefaultEventLoop()); + + InsightsClient insightsClient = + new InsightsClient( + mockDefaultDriverContext(), + MOCK_TIME_SUPPLIER, + insightsConfiguration, + null, + null, + null, + null, + null, + EMPTY_STACK_TRACE); + + // when + insightsClient.scheduleStatusMessageSend(); + // emulate periodic calls to sendStatusMessage + insightsClient.sendStatusMessage(); + insightsClient.sendStatusMessage(); + insightsClient.sendStatusMessage(); + + // then + verify(insightsConfiguration, times(1)).isMonitorReportingEnabled(); + } + + @DataProvider + public static Object[][] stackTraceProvider() { + StackTraceElement[] onlyInitCall = + new StackTraceElement[] { + new StackTraceElement( + "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", + "", + "DefaultDriverContext.java", + 94), + }; + + StackTraceElement[] stackTraceElementsWithoutInitCall = + new StackTraceElement[] { + new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), + new StackTraceElement( + "com.datastax.driver.core.InsightsClient", + "getClusterCreateCaller", + "InsightsClient.java", + 302) + }; + StackTraceElement[] stackTraceWithOneInitCall = + new StackTraceElement[] { + new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), + new StackTraceElement( + "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", + "", + "DefaultDriverContext.java", + 243), + }; + StackTraceElement[] stackTraceWithOneInitCallAndCaller = + new StackTraceElement[] { + new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), + new StackTraceElement( + "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", + "", + "DefaultDriverContext.java", + 243), + new StackTraceElement( + "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 1) + }; + + StackTraceElement[] stackTraceWithTwoInitCallsAndCaller = + new StackTraceElement[] { + new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), + new StackTraceElement( + "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", + "", + "DefaultDriverContext.java", + 243), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "buildDefaultSessionAsync", + "SessionBuilder.java", + 300), + new StackTraceElement( + "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 1) + }; + StackTraceElement[] stackTraceWithChainOfInitCalls = + new StackTraceElement[] { + new StackTraceElement( + "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", + "", + "DefaultDriverContext.java", + 243), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "buildDefaultSessionAsync", + "SessionBuilder.java", + 332), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "buildAsync", + "SessionBuilder.java", + 291), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "build", + "SessionBuilder.java", + 306) + }; + StackTraceElement[] stackTraceWithChainOfInitCallsAndCaller = + new StackTraceElement[] { + new StackTraceElement("java.lang.Thread", "getStackTrace", "Thread.java", 1559), + new StackTraceElement( + "com.datastax.oss.driver.internal.core.context.DefaultDriverContext", + "", + "DefaultDriverContext.java", + 243), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "buildContext", + "SessionBuilder.java", + 687), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "buildDefaultSessionAsync", + "SessionBuilder.java", + 332), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "buildAsync", + "SessionBuilder.java", + 291), + new StackTraceElement( + "com.datastax.oss.driver.api.core.session.SessionBuilder", + "build", + "SessionBuilder.java", + 306), + new StackTraceElement( + "com.example.ActualCallerNameApp", "main", "ActualCallerNameApp.java", 8) + }; + + return new Object[][] { + {new StackTraceElement[] {}, InsightsClient.DEFAULT_JAVA_APPLICATION}, + {stackTraceElementsWithoutInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, + {stackTraceWithOneInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, + {onlyInitCall, InsightsClient.DEFAULT_JAVA_APPLICATION}, + {stackTraceWithOneInitCallAndCaller, "com.example.ActualCallerNameApp"}, + {stackTraceWithTwoInitCallsAndCaller, "com.example.ActualCallerNameApp"}, + {stackTraceWithChainOfInitCalls, InsightsClient.DEFAULT_JAVA_APPLICATION}, + {stackTraceWithChainOfInitCallsAndCaller, "com.example.ActualCallerNameApp"} + }; + } + + private DefaultDriverContext mockDefaultDriverContext() throws UnknownHostException { + DefaultDriverContext context = mock(DefaultDriverContext.class); + mockConnectionPools(context); + MetadataManager manager = mock(MetadataManager.class); + when(context.getMetadataManager()).thenReturn(manager); + Metadata metadata = mock(Metadata.class); + when(manager.getMetadata()).thenReturn(metadata); + Node node = mock(Node.class); + when(node.getExtras()) + .thenReturn( + ImmutableMap.of( + DseNodeProperties.DSE_VERSION, Objects.requireNonNull(Version.parse("6.0.5")))); + when(metadata.getNodes()).thenReturn(ImmutableMap.of(UUID.randomUUID(), node)); + DriverExecutionProfile defaultExecutionProfile = mockDefaultExecutionProfile(); + DriverExecutionProfile nonDefaultExecutionProfile = + mockNonDefaultRequestTimeoutExecutionProfile(); + + Map startupOptions = new HashMap<>(); + startupOptions.put(StartupOptionsBuilder.CLIENT_ID_KEY, "client-id"); + startupOptions.put(StartupOptionsBuilder.APPLICATION_VERSION_KEY, "1.0.0"); + startupOptions.put(StartupOptionsBuilder.APPLICATION_NAME_KEY, "app-name"); + startupOptions.put(StartupOptionsBuilder.DRIVER_VERSION_KEY, "2.x"); + startupOptions.put(StartupOptionsBuilder.DRIVER_NAME_KEY, "DataStax Enterprise Java Driver"); + + when(context.getStartupOptions()).thenReturn(startupOptions); + when(context.getProtocolVersion()).thenReturn(DSE_V2); + DefaultNode contactPoint = mock(DefaultNode.class); + EndPoint contactEndPoint = mock(EndPoint.class); + when(contactEndPoint.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 9999)); + when(contactPoint.getEndPoint()).thenReturn(contactEndPoint); + when(manager.getContactPoints()).thenReturn(ImmutableSet.of(contactPoint)); + + DriverConfig driverConfig = mock(DriverConfig.class); + when(context.getConfig()).thenReturn(driverConfig); + Map profiles = + ImmutableMap.of( + "default", defaultExecutionProfile, "non-default", nonDefaultExecutionProfile); + Mockito.>when(driverConfig.getProfiles()) + .thenReturn(profiles); + when(driverConfig.getDefaultProfile()).thenReturn(defaultExecutionProfile); + + ControlConnection controlConnection = mock(ControlConnection.class); + DriverChannel channel = mock(DriverChannel.class); + EndPoint controlConnectionEndpoint = mock(EndPoint.class); + when(controlConnectionEndpoint.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); + + when(channel.getEndPoint()).thenReturn(controlConnectionEndpoint); + when(channel.localAddress()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); + when(controlConnection.channel()).thenReturn(channel); + when(context.getControlConnection()).thenReturn(controlConnection); + return context; + } + + private void mockConnectionPools(DefaultDriverContext driverContext) { + Node node1 = mock(Node.class); + EndPoint endPoint1 = mock(EndPoint.class); + when(endPoint1.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 10)); + when(node1.getEndPoint()).thenReturn(endPoint1); + when(node1.getOpenConnections()).thenReturn(1); + ChannelPool channelPool1 = mock(ChannelPool.class); + when(channelPool1.getInFlight()).thenReturn(10); + + Node node2 = mock(Node.class); + EndPoint endPoint2 = mock(EndPoint.class); + when(endPoint2.resolve()).thenReturn(new InetSocketAddress("127.0.0.1", 20)); + when(node2.getEndPoint()).thenReturn(endPoint2); + when(node2.getOpenConnections()).thenReturn(2); + ChannelPool channelPool2 = mock(ChannelPool.class); + when(channelPool2.getInFlight()).thenReturn(20); + + Map channelPools = ImmutableMap.of(node1, channelPool1, node2, channelPool2); + PoolManager poolManager = mock(PoolManager.class); + when(poolManager.getPools()).thenReturn(channelPools); + when(driverContext.getPoolManager()).thenReturn(poolManager); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java new file mode 100644 index 00000000000..9edd4494bdd --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/InsightsSupportVerifierTest.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Collection; +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class InsightsSupportVerifierTest { + + @Test + @UseDataProvider(value = "dseHostsProvider") + public void should_detect_DSE_versions_that_supports_insights( + Collection hosts, boolean expected) { + // when + boolean result = InsightsSupportVerifier.supportsInsights(hosts); + + // then + assertThat(result).isEqualTo(expected); + } + + @DataProvider + public static Object[][] dseHostsProvider() { + Node dse605 = mock(Node.class); + when(dse605.getExtras()) + .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.5"))); + Node dse604 = mock(Node.class); + when(dse604.getExtras()) + .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.4"))); + Node dse600 = mock(Node.class); + when(dse600.getExtras()) + .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("6.0.0"))); + Node dse5113 = mock(Node.class); + when(dse5113.getExtras()) + .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("5.1.13"))); + Node dse500 = mock(Node.class); + when(dse500.getExtras()) + .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, Version.parse("5.0.0"))); + Node nodeWithoutExtras = mock(Node.class); + when(nodeWithoutExtras.getExtras()).thenReturn(Collections.emptyMap()); + + return new Object[][] { + {ImmutableList.of(dse605), true}, + {ImmutableList.of(dse604), false}, + {ImmutableList.of(dse600), false}, + {ImmutableList.of(dse5113), true}, + {ImmutableList.of(dse500), false}, + {ImmutableList.of(dse5113, dse605), true}, + {ImmutableList.of(dse5113, dse600), false}, + {ImmutableList.of(dse500, dse600), false}, + {ImmutableList.of(), false}, + {ImmutableList.of(nodeWithoutExtras), false} + }; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java new file mode 100644 index 00000000000..336f19184d3 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PackageUtilTest.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class PackageUtilTest { + + private static final String DEFAULT_PACKAGE = "default.package"; + + @Test + public void should_find_package_name_for_class() { + // given + TestClass testClass = new TestClass(); + + // then + String namespace = PackageUtil.getNamespace(testClass.getClass()); + + // then + assertThat(namespace).isEqualTo("com.datastax.dse.driver.internal.core.insights"); + } + + @Test + @UseDataProvider("packagesProvider") + public void should_get_full_package_or_return_default(String fullClassSetting, String expected) { + // when + String result = PackageUtil.getFullPackageOrDefault(fullClassSetting, DEFAULT_PACKAGE); + + // then + assertThat(result).isEqualTo(expected); + } + + @Test + @UseDataProvider("classesProvider") + public void should_get_class_name_from_full_class_setting( + String fullClassSetting, String expected) { + // when + String result = PackageUtil.getClassName(fullClassSetting); + + // then + assertThat(result).isEqualTo(expected); + } + + @DataProvider + public static Object[][] packagesProvider() { + return new Object[][] { + {"com.P", "com"}, + {"ClassName", DEFAULT_PACKAGE}, + {"", DEFAULT_PACKAGE}, + {"com.p.a.2.x.12.Class", "com.p.a.2.x.12"}, + }; + } + + @DataProvider + public static Object[][] classesProvider() { + return new Object[][] { + {"com.P", "P"}, + {"ClassName", "ClassName"}, + {"", ""}, + {"com.p.a.2.x.12.Class", "Class"}, + }; + } + + private static class TestClass {} +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java new file mode 100644 index 00000000000..2a098363d46 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/PlatformInfoFinderTest.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static com.datastax.dse.driver.internal.core.insights.PlatformInfoFinder.UNVERIFIED_RUNTIME_VERSION; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.internal.core.insights.schema.InsightsPlatformInfo.RuntimeAndCompileTimeVersions; +import java.io.InputStream; +import java.net.URL; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import org.junit.Test; + +public class PlatformInfoFinderTest { + + private URL nullUrlProvider(PlatformInfoFinder.DependencyFromFile d) { + return null; + } + + private URL nettyUrlProvider(PlatformInfoFinder.DependencyFromFile d) { + return this.getClass().getResource("/insights/pom.properties"); + } + + private URL malformedUrlProvider(PlatformInfoFinder.DependencyFromFile d) { + return this.getClass().getResource("/insights/malformed-pom.properties"); + } + + private URL nonExistingUrlProvider(PlatformInfoFinder.DependencyFromFile d) { + return this.getClass().getResource("/insights/non-existing.pom"); + } + + @Test + public void should_find_dependencies_from_file() { + // given + InputStream inputStream = + this.getClass().getResourceAsStream("/insights/test-dependencies.txt"); + Map expected = new HashMap<>(); + expected.put( + "io.netty:netty-transport-native-epoll", + withUnverifiedRuntimeVersionOptional("4.0.56.Final")); + expected.put("org.slf4j:slf4j-api", withUnverifiedRuntimeVersion("1.7.25")); + expected.put("org.ow2.asm:asm", withUnverifiedRuntimeVersion("5.0.3")); + expected.put("com.esri.geometry:esri-geometry-api", withUnverifiedRuntimeVersion("1.2.1")); + expected.put("io.netty:netty-transport", withUnverifiedRuntimeVersion("4.0.56.Final")); + expected.put("com.github.jnr:jnr-x86asm", withUnverifiedRuntimeVersion("1.0.2")); + expected.put("org.ow2.asm:asm-analysis", withUnverifiedRuntimeVersion("5.0.3")); + expected.put("com.github.jnr:jnr-constants", withUnverifiedRuntimeVersion("0.9.9")); + expected.put("io.netty:netty-common", withUnverifiedRuntimeVersion("4.0.56.Final")); + expected.put("com.google.guava:guava", withUnverifiedRuntimeVersion("19.0")); + expected.put("org.xerial.snappy:snappy-java", withUnverifiedRuntimeVersionOptional("1.1.2.6")); + expected.put("io.dropwizard.metrics:metrics-core", withUnverifiedRuntimeVersion("3.2.2")); + expected.put("org.ow2.asm:asm-tree", withUnverifiedRuntimeVersion("5.0.3")); + expected.put("com.github.jnr:jnr-posix", withUnverifiedRuntimeVersion("3.0.44")); + expected.put("org.codehaus.jackson:jackson-core-asl", withUnverifiedRuntimeVersion("1.9.12")); + expected.put( + "com.fasterxml.jackson.core:jackson-databind", withUnverifiedRuntimeVersion("2.7.9.3")); + expected.put("io.netty:netty-codec", withUnverifiedRuntimeVersion("4.0.56.Final")); + expected.put( + "com.fasterxml.jackson.core:jackson-annotations", withUnverifiedRuntimeVersion("2.8.11")); + expected.put("com.fasterxml.jackson.core:jackson-core", withUnverifiedRuntimeVersion("2.8.11")); + expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.56.Final")); + expected.put("at.yawk.lz4:lz4-java", withUnverifiedRuntimeVersionOptional("1.10.1")); + expected.put("org.hdrhistogram:HdrHistogram", withUnverifiedRuntimeVersionOptional("2.1.10")); + expected.put("com.github.jnr:jffi", withUnverifiedRuntimeVersion("1.2.16")); + expected.put("io.netty:netty-buffer", withUnverifiedRuntimeVersion("4.0.56.Final")); + expected.put("org.ow2.asm:asm-commons", withUnverifiedRuntimeVersion("5.0.3")); + expected.put("org.json:json", withUnverifiedRuntimeVersion("20090211")); + expected.put("org.ow2.asm:asm-util", withUnverifiedRuntimeVersion("5.0.3")); + expected.put("com.github.jnr:jnr-ffi", withUnverifiedRuntimeVersion("2.1.7")); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).hasSize(28); + assertThat(stringStringMap).isEqualTo(expected); + } + + @Test + public void should_find_dependencies_from_file_without_duplicate() { + // given + InputStream inputStream = + this.getClass().getResourceAsStream("/insights/duplicate-dependencies.txt"); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).hasSize(1); + } + + @Test + public void should_keep_order_of_dependencies() { + // given + InputStream inputStream = + this.getClass().getResourceAsStream("/insights/ordered-dependencies.txt"); + Map expected = new LinkedHashMap<>(); + expected.put("b-org.com:art1", withUnverifiedRuntimeVersion("1.0")); + expected.put("a-org.com:art1", withUnverifiedRuntimeVersion("2.0")); + expected.put("c-org.com:art1", withUnverifiedRuntimeVersion("3.0")); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::nullUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).isEqualTo(expected); + Iterator iterator = expected.keySet().iterator(); + assertThat(iterator.next()).isEqualTo("b-org.com:art1"); + assertThat(iterator.next()).isEqualTo("a-org.com:art1"); + assertThat(iterator.next()).isEqualTo("c-org.com:art1"); + } + + @Test + public void should_add_information_about_java_platform() { + // given + Map> runtimeDependencies = new HashMap<>(); + + // when + new PlatformInfoFinder(this::nullUrlProvider).addJavaVersion(runtimeDependencies); + + // then + Map javaDependencies = runtimeDependencies.get("java"); + assertThat(javaDependencies.size()).isEqualTo(3); + } + + @Test + public void should_load_runtime_version_from_pom_properties_URL() { + // given + InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); + Map expected = new LinkedHashMap<>(); + expected.put( + "io.netty:netty-handler", + new RuntimeAndCompileTimeVersions("4.0.56.Final", "4.0.0.Final", false)); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::nettyUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).isEqualTo(expected); + } + + @Test + public void should_load_runtime_version_of_optional_dependency_from_pom_properties_URL() { + // given + InputStream inputStream = + this.getClass().getResourceAsStream("/insights/netty-dependency-optional.txt"); + Map expected = new LinkedHashMap<>(); + expected.put( + "io.netty:netty-handler", + new RuntimeAndCompileTimeVersions("4.0.56.Final", "4.0.0.Final", true)); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::nettyUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).isEqualTo(expected); + } + + @Test + public void should_not_load_runtime_dependency_from_malformed_pom_properties() { + // given + InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); + Map expected = new LinkedHashMap<>(); + expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.0.Final")); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::malformedUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).isEqualTo(expected); + } + + @Test + public void should_not_load_runtime_dependency_from_non_existing_pom_properties() { + // given + InputStream inputStream = this.getClass().getResourceAsStream("/insights/netty-dependency.txt"); + Map expected = new LinkedHashMap<>(); + expected.put("io.netty:netty-handler", withUnverifiedRuntimeVersion("4.0.0.Final")); + + // when + Map stringStringMap = + new PlatformInfoFinder(this::nonExistingUrlProvider).fetchDependenciesFromFile(inputStream); + + // then + assertThat(stringStringMap).isEqualTo(expected); + } + + private RuntimeAndCompileTimeVersions withUnverifiedRuntimeVersion(String compileVersion) { + return new RuntimeAndCompileTimeVersions(UNVERIFIED_RUNTIME_VERSION, compileVersion, false); + } + + private RuntimeAndCompileTimeVersions withUnverifiedRuntimeVersionOptional( + String compileVersion) { + return new RuntimeAndCompileTimeVersions(UNVERIFIED_RUNTIME_VERSION, compileVersion, true); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java new file mode 100644 index 00000000000..a076ca38b1c --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/insights/ReconnectionPolicyInfoFinderTest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.insights; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.internal.core.insights.schema.ReconnectionPolicyInfo; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; +import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; +import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; +import java.time.Duration; +import org.assertj.core.data.MapEntry; +import org.junit.Test; + +public class ReconnectionPolicyInfoFinderTest { + + @Test + public void should_find_an_info_about_constant_reconnection_policy() { + // given + DriverExecutionProfile driverExecutionProfile = mock(DriverExecutionProfile.class); + when(driverExecutionProfile.getDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY)) + .thenReturn(Duration.ofMillis(100)); + ReconnectionPolicy constantReconnectionPolicy = mock(ConstantReconnectionPolicy.class); + + // when + ReconnectionPolicyInfo reconnectionPolicyInfo = + new ReconnectionPolicyInfoFinder() + .getReconnectionPolicyInfo(constantReconnectionPolicy, driverExecutionProfile); + + // then + assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("delayMs", 100L)); + assertThat(reconnectionPolicyInfo.getType()).contains("ConstantReconnectionPolicy"); + } + + @Test + public void should_find_an_info_about_exponential_reconnection_policy() { + ExponentialReconnectionPolicy exponentialReconnectionPolicy = + mock(ExponentialReconnectionPolicy.class); + when(exponentialReconnectionPolicy.getBaseDelayMs()).thenReturn(100L); + when(exponentialReconnectionPolicy.getMaxAttempts()).thenReturn(10L); + when(exponentialReconnectionPolicy.getMaxDelayMs()).thenReturn(200L); + + // when + ReconnectionPolicyInfo reconnectionPolicyInfo = + new ReconnectionPolicyInfoFinder() + .getReconnectionPolicyInfo(exponentialReconnectionPolicy, null); + + // then + assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("baseDelayMs", 100L)); + assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("maxAttempts", 10L)); + assertThat(reconnectionPolicyInfo.getOptions()).contains(MapEntry.entry("maxDelayMs", 200L)); + assertThat(reconnectionPolicyInfo.getType()).contains("ExponentialReconnectionPolicy"); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java new file mode 100644 index 00000000000..3ef89c78714 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/protocol/TinkerpopBufferPrimitiveCodecTest.java @@ -0,0 +1,358 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.protocol; + +import static com.datastax.dse.driver.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.dse.driver.Assertions; +import com.datastax.dse.driver.internal.core.graph.binary.buffer.DseNettyBufferFactory; +import com.datastax.oss.driver.internal.core.protocol.ByteBufPrimitiveCodecTest; +import com.datastax.oss.protocol.internal.util.Bytes; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.function.Supplier; +import org.apache.tinkerpop.gremlin.structure.io.Buffer; +import org.junit.Test; +import org.junit.runner.RunWith; + +/** + * Note: like {@link ByteBufPrimitiveCodecTest} we don't test trivial methods that simply delegate + * to the underlying Buffer, nor default implementations inherited from {@link + * com.datastax.oss.protocol.internal.PrimitiveCodec}. + */ +@RunWith(DataProviderRunner.class) +public class TinkerpopBufferPrimitiveCodecTest { + + private static final DseNettyBufferFactory factory = new DseNettyBufferFactory(); + private final TinkerpopBufferPrimitiveCodec codec = new TinkerpopBufferPrimitiveCodec(factory); + + @Test + public void should_concatenate() { + Buffer left = factory.withBytes(0xca, 0xfe); + Buffer right = factory.withBytes(0xba, 0xbe); + assertThat(codec.concat(left, right)).containsExactly("0xcafebabe"); + } + + @Test + public void should_read_inet_v4() { + Buffer source = + factory.withBytes( + // length (as a byte) + 0x04, + // address + 0x7f, + 0x00, + 0x00, + 0x01, + // port (as an int) + 0x00, + 0x00, + 0x23, + 0x52); + InetSocketAddress inet = codec.readInet(source); + assertThat(inet.getAddress().getHostAddress()).isEqualTo("127.0.0.1"); + assertThat(inet.getPort()).isEqualTo(9042); + } + + @Test + public void should_read_inet_v6() { + Buffer lengthAndAddress = factory.heap(17); + lengthAndAddress.writeByte(16); + lengthAndAddress.writeLong(0); + lengthAndAddress.writeLong(1); + Buffer source = + codec.concat( + lengthAndAddress, + // port (as an int) + factory.withBytes(0x00, 0x00, 0x23, 0x52)); + InetSocketAddress inet = codec.readInet(source); + assertThat(inet.getAddress().getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); + assertThat(inet.getPort()).isEqualTo(9042); + } + + @Test + public void should_fail_to_read_inet_if_length_invalid() { + Buffer source = + factory.withBytes( + // length (as a byte) + 0x03, + // address + 0x7f, + 0x00, + 0x01, + // port (as an int) + 0x00, + 0x00, + 0x23, + 0x52); + assertThatThrownBy(() -> codec.readInet(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Invalid address length: 3 ([127, 0, 1])"); + } + + @Test + public void should_read_inetaddr_v4() { + Buffer source = + factory.withBytes( + // length (as a byte) + 0x04, + // address + 0x7f, + 0x00, + 0x00, + 0x01); + InetAddress inetAddr = codec.readInetAddr(source); + assertThat(inetAddr.getHostAddress()).isEqualTo("127.0.0.1"); + } + + @Test + public void should_read_inetaddr_v6() { + Buffer source = factory.heap(17); + source.writeByte(16); + source.writeLong(0); + source.writeLong(1); + InetAddress inetAddr = codec.readInetAddr(source); + assertThat(inetAddr.getHostAddress()).isEqualTo("0:0:0:0:0:0:0:1"); + } + + @Test + public void should_fail_to_read_inetaddr_if_length_invalid() { + Buffer source = + factory.withBytes( + // length (as a byte) + 0x03, + // address + 0x7f, + 0x00, + 0x01); + assertThatThrownBy(() -> codec.readInetAddr(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Invalid address length: 3 ([127, 0, 1])"); + } + + @Test + public void should_read_bytes() { + Buffer source = + factory.withBytes( + // length (as an int) + 0x00, + 0x00, + 0x00, + 0x04, + // contents + 0xca, + 0xfe, + 0xba, + 0xbe); + ByteBuffer bytes = codec.readBytes(source); + assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); + } + + @Test + public void should_read_null_bytes() { + Buffer source = factory.withBytes(0xFF, 0xFF, 0xFF, 0xFF); // -1 (as an int) + assertThat(codec.readBytes(source)).isNull(); + } + + @Test + public void should_read_short_bytes() { + Buffer source = + factory.withBytes( + // length (as an unsigned short) + 0x00, + 0x04, + // contents + 0xca, + 0xfe, + 0xba, + 0xbe); + assertThat(Bytes.toHexString(codec.readShortBytes(source))).isEqualTo("0xcafebabe"); + } + + @DataProvider + public static Object[][] bufferTypes() { + return new Object[][] { + {(Supplier) factory::heap}, + {(Supplier) factory::io}, + {(Supplier) factory::direct} + }; + } + + @Test + @UseDataProvider("bufferTypes") + public void should_read_string(Supplier supplier) { + Buffer source = + factory.withBytes( + supplier, + // length (as an unsigned short) + 0x00, + 0x05, + // UTF-8 contents + 0x68, + 0x65, + 0x6c, + 0x6c, + 0x6f); + assertThat(codec.readString(source)).isEqualTo("hello"); + } + + @Test + public void should_fail_to_read_string_if_not_enough_characters() { + Buffer source = factory.heap(); + source.writeShort(4); + + assertThatThrownBy(() -> codec.readString(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); + } + + @Test + public void should_read_long_string() { + Buffer source = + factory.withBytes( + // length (as an int) + 0x00, + 0x00, + 0x00, + 0x05, + // UTF-8 contents + 0x68, + 0x65, + 0x6c, + 0x6c, + 0x6f); + assertThat(codec.readLongString(source)).isEqualTo("hello"); + } + + @Test + public void should_fail_to_read_long_string_if_not_enough_characters() { + Buffer source = factory.heap(4, 4); + source.writeInt(4); + + assertThatThrownBy(() -> codec.readLongString(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); + } + + @Test + public void should_write_inet_v4() throws Exception { + Buffer dest = factory.heap(1 + 4 + 4); + InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9042); + codec.writeInet(inet, dest); + assertThat(dest) + .containsExactly( + "0x04" // size as a byte + + "7f000001" // address + + "00002352" // port + ); + } + + @Test + public void should_write_inet_v6() throws Exception { + Buffer dest = factory.heap(1 + 16 + 4); + InetSocketAddress inet = new InetSocketAddress(InetAddress.getByName("::1"), 9042); + codec.writeInet(inet, dest); + assertThat(dest) + .containsExactly( + "0x10" // size as a byte + + "00000000000000000000000000000001" // address + + "00002352" // port + ); + } + + @Test + public void should_write_inetaddr_v4() throws Exception { + Buffer dest = factory.heap(1 + 4); + InetAddress inetAddr = InetAddress.getByName("127.0.0.1"); + codec.writeInetAddr(inetAddr, dest); + assertThat(dest) + .containsExactly( + "0x04" // size as a byte + + "7f000001" // address + ); + } + + @Test + public void should_write_inetaddr_v6() throws Exception { + Buffer dest = factory.heap(1 + 16); + InetAddress inetAddr = InetAddress.getByName("::1"); + codec.writeInetAddr(inetAddr, dest); + Assertions.assertThat(dest) + .containsExactly( + "0x10" // size as a byte + + "00000000000000000000000000000001" // address + ); + } + + @Test + public void should_write_string() { + Buffer dest = factory.heap(); + codec.writeString("hello", dest); + assertThat(dest) + .containsExactly( + "0x0005" // size as an unsigned short + + "68656c6c6f" // UTF-8 contents + ); + } + + @Test + public void should_write_long_string() { + Buffer dest = factory.heap(9); + codec.writeLongString("hello", dest); + assertThat(dest) + .containsExactly( + "0x00000005" + + // size as an int + "68656c6c6f" // UTF-8 contents + ); + } + + @Test + public void should_write_bytes() { + Buffer dest = factory.heap(8); + codec.writeBytes(Bytes.fromHexString("0xcafebabe"), dest); + assertThat(dest) + .containsExactly( + "0x00000004" + + // size as an int + "cafebabe"); + } + + @Test + public void should_write_short_bytes() { + Buffer dest = factory.heap(6); + codec.writeShortBytes(new byte[] {(byte) 0xca, (byte) 0xfe, (byte) 0xba, (byte) 0xbe}, dest); + assertThat(dest) + .containsExactly( + "0x0004" + + // size as an unsigned short + "cafebabe"); + } + + @Test + public void should_write_null_bytes() { + Buffer dest = factory.heap(4); + codec.writeBytes((ByteBuffer) null, dest); + assertThat(dest).containsExactly("0xFFFFFFFF"); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java new file mode 100644 index 00000000000..9e4d019660c --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/GeometryCodecTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.Geometry; + +public abstract class GeometryCodecTest> { + + private C codec; + + protected GeometryCodecTest(C codec) { + this.codec = codec; + } + + public void should_format(G input, String expected) { + assertThat(codec.format(input)).isEqualTo(expected); + } + + public void should_parse(String input, G expected) { + assertThat(codec.parse(input)).isEqualTo(expected); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java new file mode 100644 index 00000000000..ba71026ac2c --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/LineStringCodecTest.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultLineString; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class LineStringCodecTest extends GeometryCodecTest { + + private static DefaultLineString lineString = + new DefaultLineString( + new DefaultPoint(30, 10), new DefaultPoint(10, 30), new DefaultPoint(40, 40)); + + public LineStringCodecTest() { + super(new LineStringCodec()); + } + + @DataProvider + public static Object[][] serde() { + return new Object[][] {{null, null}, {lineString, lineString}}; + } + + @DataProvider + public static Object[][] format() { + return new Object[][] {{null, "NULL"}, {lineString, "'LINESTRING (30 10, 10 30, 40 40)'"}}; + } + + @DataProvider + public static Object[][] parse() { + return new Object[][] { + {null, null}, + {"", null}, + {" ", null}, + {"NULL", null}, + {" NULL ", null}, + {"'LINESTRING (30 10, 10 30, 40 40)'", lineString}, + {" ' LineString (30 10, 10 30, 40 40 ) ' ", lineString} + }; + } + + @Test + @UseDataProvider("format") + @Override + public void should_format(LineString input, String expected) { + super.should_format(input, expected); + } + + @Test + @UseDataProvider("parse") + @Override + public void should_parse(String input, LineString expected) { + super.should_parse(input, expected); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java new file mode 100644 index 00000000000..7948f4d758a --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PointCodecTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class PointCodecTest extends GeometryCodecTest { + + public PointCodecTest() { + super(new PointCodec()); + } + + @DataProvider + public static Object[][] serde() { + return new Object[][] { + {null, null}, + {Point.fromCoordinates(1, 2), Point.fromCoordinates(1, 2)}, + {Point.fromCoordinates(-1.1, -2.2), Point.fromCoordinates(-1.1, -2.2)} + }; + } + + @DataProvider + public static Object[][] format() { + return new Object[][] { + {null, "NULL"}, + {Point.fromCoordinates(1, 2), "'POINT (1 2)'"}, + {Point.fromCoordinates(-1.1, -2.2), "'POINT (-1.1 -2.2)'"} + }; + } + + @DataProvider + public static Object[][] parse() { + return new Object[][] { + {null, null}, + {"", null}, + {" ", null}, + {"NULL", null}, + {" NULL ", null}, + {"'POINT ( 1 2 )'", Point.fromCoordinates(1, 2)}, + {"'POINT ( 1.0 2.0 )'", Point.fromCoordinates(1, 2)}, + {"' point ( -1.1 -2.2 )'", Point.fromCoordinates(-1.1, -2.2)}, + {" ' Point ( -1.1 -2.2 ) ' ", Point.fromCoordinates(-1.1, -2.2)} + }; + } + + @Test + @UseDataProvider("format") + @Override + public void should_format(Point input, String expected) { + super.should_format(input, expected); + } + + @Test + @UseDataProvider("parse") + @Override + public void should_parse(String input, Point expected) { + super.should_parse(input, expected); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java new file mode 100644 index 00000000000..290dabe7519 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/geometry/PolygonCodecTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.geometry; + +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPoint; +import com.datastax.dse.driver.internal.core.data.geometry.DefaultPolygon; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class PolygonCodecTest extends GeometryCodecTest { + private static Polygon polygon = + new DefaultPolygon( + new DefaultPoint(30, 10), + new DefaultPoint(10, 20), + new DefaultPoint(20, 40), + new DefaultPoint(40, 40)); + + public PolygonCodecTest() { + super(new PolygonCodec()); + } + + @DataProvider + public static Object[][] serde() { + return new Object[][] {{null, null}, {polygon, polygon}}; + } + + @DataProvider + public static Object[][] format() { + return new Object[][] { + {null, "NULL"}, {polygon, "'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'"} + }; + } + + @DataProvider + public static Object[][] parse() { + return new Object[][] { + {null, null}, + {"", null}, + {" ", null}, + {"NULL", null}, + {" NULL ", null}, + {"'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'", polygon}, + {" ' Polygon ( ( 30 10, 40 40, 20 40, 10 20, 30 10 ) ) ' ", polygon} + }; + } + + @Test + @UseDataProvider("format") + @Override + public void should_format(Polygon input, String expected) { + super.should_format(input, expected); + } + + @Test + @UseDataProvider("parse") + @Override + public void should_parse(String input, Polygon expected) { + super.should_parse(input, expected); + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java new file mode 100644 index 00000000000..b9b618b8dd3 --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/type/codec/time/DateRangeCodecTest.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.type.codec.time; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.time.DateRange; +import com.datastax.dse.driver.api.core.type.codec.DseTypeCodecs; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.shaded.guava.common.base.MoreObjects; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.nio.ByteBuffer; +import java.text.ParseException; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class DateRangeCodecTest { + + @Test + @UseDataProvider("dateRanges") + public void should_encode_and_decode(DateRange dateRange) { + TypeCodec codec = DseTypeCodecs.DATE_RANGE; + DateRange decoded = + codec.decode(codec.encode(dateRange, ProtocolVersion.DEFAULT), ProtocolVersion.DEFAULT); + assertThat(decoded).isEqualTo(dateRange); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_encode_unknown_date_range_type() { + DseTypeCodecs.DATE_RANGE.decode(ByteBuffer.wrap(new byte[] {127}), ProtocolVersion.DEFAULT); + } + + @Test + @UseDataProvider("dateRangeStrings") + public void should_format_and_parse(String dateRangeString) { + TypeCodec codec = DseTypeCodecs.DATE_RANGE; + String formatted = codec.format(codec.parse(dateRangeString)); + assertThat(formatted).isEqualTo(MoreObjects.firstNonNull(dateRangeString, "NULL")); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_string() { + DseTypeCodecs.DATE_RANGE.parse("foo"); + } + + @DataProvider + public static Object[][] dateRanges() throws ParseException { + return new Object[][] { + {null}, + {DateRange.parse("[2011-01 TO 2015]")}, + {DateRange.parse("[2010-01-02 TO 2015-05-05T13]")}, + {DateRange.parse("[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]")}, + {DateRange.parse("[2010-01-01T15 TO 2016-02]")}, + {DateRange.parse("[1500 TO 1501]")}, + {DateRange.parse("[0001-01-01 TO 0001-01-01]")}, + {DateRange.parse("[0001-01-01 TO 0001-01-02]")}, + {DateRange.parse("[0000-01-01 TO 0000-01-01]")}, + {DateRange.parse("[0000-01-01 TO 0000-01-02]")}, + {DateRange.parse("[-0001-01-01 TO -0001-01-01]")}, + {DateRange.parse("[-0001-01-01 TO -0001-01-02]")}, + {DateRange.parse("[* TO 2014-12-01]")}, + {DateRange.parse("[1999 TO *]")}, + {DateRange.parse("[* TO *]")}, + {DateRange.parse("-0009")}, + {DateRange.parse("2000-11")}, + {DateRange.parse("*")} + }; + } + + @DataProvider + public static Object[][] dateRangeStrings() { + return new Object[][] { + {null}, + {"NULL"}, + {"'[2011-01 TO 2015]'"}, + {"'[2010-01-02 TO 2015-05-05T13]'"}, + {"'[1973-06-30T13:57:28.123Z TO 1999-05-05T14:14:59]'"}, + {"'[2010-01-01T15 TO 2016-02]'"}, + {"'[1500 TO 1501]'"}, + {"'[0001-01-01 TO 0001-01-01]'"}, + {"'[0001-01-01 TO 0001-01-02]'"}, + {"'[0000-01-01 TO 0000-01-01]'"}, + {"'[0000-01-01 TO 0000-01-02]'"}, + {"'[-0001-01-01 TO -0001-01-01]'"}, + {"'[-0001-01-01 TO -0001-01-02]'"}, + {"'[* TO 2014-12-01]'"}, + {"'[1999 TO *]'"}, + {"'[* TO *]'"}, + {"'-0009'"}, + {"'2000-11'"}, + {"'*'"} + }; + } +} diff --git a/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java b/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java new file mode 100644 index 00000000000..5cf8a67f84b --- /dev/null +++ b/core/src/test/java/com/datastax/dse/driver/internal/core/util/concurrent/BoundedConcurrentQueueTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.core.util.concurrent; + +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.concurrent.CompletionStage; +import org.junit.Test; + +public class BoundedConcurrentQueueTest { + + @Test + public void should_dequeue_null_when_empty() { + BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); + assertThat(queue.peek()).isNull(); + assertThat(queue.poll()).isNull(); + } + + @Test + public void should_enqueue_and_dequeue_while_not_full() { + BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); + + assertThatStage(queue.offer(1)).isSuccess(e -> assertThat(e).isEqualTo(1)); + assertThat(queue.peek()).isEqualTo(1); + assertThat(queue.poll()).isEqualTo(1); + + assertThatStage(queue.offer(2)).isSuccess(e -> assertThat(e).isEqualTo(2)); + assertThatStage(queue.offer(3)).isSuccess(e -> assertThat(e).isEqualTo(3)); + assertThatStage(queue.offer(4)).isSuccess(e -> assertThat(e).isEqualTo(4)); + + assertThat(queue.peek()).isEqualTo(2); + assertThat(queue.poll()).isEqualTo(2); + assertThat(queue.peek()).isEqualTo(3); + assertThat(queue.poll()).isEqualTo(3); + assertThat(queue.peek()).isEqualTo(4); + assertThat(queue.poll()).isEqualTo(4); + assertThat(queue.poll()).isNull(); + } + + @Test + public void should_delay_insertion_when_full_until_space_available() { + BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(4); + + assertThatStage(queue.offer(1)).isSuccess(e -> assertThat(e).isEqualTo(1)); + assertThatStage(queue.offer(2)).isSuccess(e -> assertThat(e).isEqualTo(2)); + assertThatStage(queue.offer(3)).isSuccess(e -> assertThat(e).isEqualTo(3)); + assertThatStage(queue.offer(4)).isSuccess(e -> assertThat(e).isEqualTo(4)); + + CompletionStage enqueue5 = queue.offer(5); + assertThat(enqueue5).isNotDone(); + + assertThat(queue.poll()).isEqualTo(1); + assertThatStage(enqueue5).isSuccess(e -> assertThat(e).isEqualTo(5)); + } + + @Test(expected = IllegalStateException.class) + public void should_fail_to_insert_when_other_insert_already_pending() { + BoundedConcurrentQueue queue = new BoundedConcurrentQueue<>(1); + assertThatStage(queue.offer(1)).isSuccess(); + assertThatStage(queue.offer(2)).isNotDone(); + queue.offer(3); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/Assertions.java b/core/src/test/java/com/datastax/oss/driver/Assertions.java index 6137414a5db..8478053e6d8 100644 --- a/core/src/test/java/com/datastax/oss/driver/Assertions.java +++ b/core/src/test/java/com/datastax/oss/driver/Assertions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java b/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java index 917d572e721..4cd9c3ed358 100644 --- a/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java +++ b/core/src/test/java/com/datastax/oss/driver/ByteBufAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java b/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java index 67c22c56eeb..085134b28f2 100644 --- a/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java +++ b/core/src/test/java/com/datastax/oss/driver/DriverRunListener.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java b/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java index 866a4bc9f75..a0448c4b769 100644 --- a/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java +++ b/core/src/test/java/com/datastax/oss/driver/TestDataProviders.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.tngtech.java.junit.dataprovider.DataProvider; import java.util.Arrays; +import java.util.Locale; public class TestDataProviders { @@ -83,4 +86,24 @@ public static Object[][] combine(Object[][]... providers) { public static Object[][] booleans() { return fromList(true, false); } + + /** An arbitrary set of locales to use when testing locale-sensitive operations. */ + @DataProvider + public static Object[][] locales() { + return new Object[][] { + new Object[] {Locale.US}, + // non-latin alphabets + new Object[] {Locale.CHINA}, + new Object[] {Locale.JAPAN}, + new Object[] {Locale.KOREA}, + new Object[] {new Locale("gr") /* greek */}, + new Object[] {new Locale("ar") /* arabic */}, + // latin-based alphabets with extended character sets + new Object[] {new Locale("vi") /* vietnamese */}, + // JAVA-2883: Turkish is the most problematic locale as String.toLowerCase("TITLE") + // wouldn't return "title" but rather "tıtle", where 'ı' is the 'LATIN SMALL LETTER + // DOTLESS I' character specific to the Turkish language. + new Object[] {new Locale("tr") /* turkish*/}, + }; + } } diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java new file mode 100644 index 00000000000..4cd4c0fcd74 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/AllNodesFailedExceptionTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core; + +import static com.datastax.oss.driver.api.core.ConsistencyLevel.QUORUM; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.data.MapEntry.entry; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class AllNodesFailedExceptionTest { + + @Mock(name = "node1") + private Node node1; + + @Mock(name = "node2") + private Node node2; + + @SuppressWarnings("deprecation") + @Test + public void should_create_instance_from_map_of_first_errors() { + // given + UnavailableException e1 = new UnavailableException(node1, QUORUM, 2, 1); + ReadTimeoutException e2 = new ReadTimeoutException(node2, QUORUM, 2, 1, false); + Map errors = ImmutableMap.of(node1, e1, node2, e2); + // when + AllNodesFailedException e = AllNodesFailedException.fromErrors(errors); + // then + assertThat(e) + .hasMessage( + "All 2 node(s) tried for the query failed " + + "(showing first 2 nodes, use getAllErrors() for more): " + + "node1: [%s], node2: [%s]", + e1, e2); + assertThat(e.getAllErrors()) + .hasEntrySatisfying(node1, list -> assertThat(list).containsExactly(e1)); + assertThat(e.getAllErrors()) + .hasEntrySatisfying(node2, list -> assertThat(list).containsExactly(e2)); + assertThat(e.getErrors()).containsEntry(node1, e1); + assertThat(e.getErrors()).containsEntry(node2, e2); + assertThat(e).hasSuppressedException(e1).hasSuppressedException(e2); + } + + @SuppressWarnings("deprecation") + @Test + public void should_create_instance_from_list_of_all_errors() { + // given + UnavailableException e1a = new UnavailableException(node1, QUORUM, 2, 1); + ReadTimeoutException e1b = new ReadTimeoutException(node1, QUORUM, 2, 1, false); + ReadTimeoutException e2a = new ReadTimeoutException(node2, QUORUM, 2, 1, false); + List> errors = + ImmutableList.of(entry(node1, e1a), entry(node1, e1b), entry(node2, e2a)); + // when + AllNodesFailedException e = AllNodesFailedException.fromErrors(errors); + // then + assertThat(e) + .hasMessage( + "All 2 node(s) tried for the query failed " + + "(showing first 2 nodes, use getAllErrors() for more): " + + "node1: [%s, %s], node2: [%s]", + e1a, e1b, e2a); + assertThat(e.getAllErrors()) + .hasEntrySatisfying(node1, list -> assertThat(list).containsExactly(e1a, e1b)); + assertThat(e.getAllErrors()) + .hasEntrySatisfying(node2, list -> assertThat(list).containsExactly(e2a)); + assertThat(e.getErrors()).containsEntry(node1, e1a); + assertThat(e.getErrors()).containsEntry(node2, e2a); + assertThat(e) + .hasSuppressedException(e1a) + .hasSuppressedException(e1b) + .hasSuppressedException(e2a); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java index db440007e92..5c7203b8f8d 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/CqlIdentifierTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +19,16 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.oss.driver.TestDataProviders; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Locale; import org.junit.Test; +import org.junit.runner.RunWith; +@RunWith(DataProviderRunner.class) public class CqlIdentifierTest { + @Test public void should_build_from_internal() { assertThat(CqlIdentifier.fromInternal("foo").asInternal()).isEqualTo("foo"); @@ -30,13 +39,22 @@ public void should_build_from_internal() { } @Test - public void should_build_from_valid_cql() { - assertThat(CqlIdentifier.fromCql("foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromCql("Foo").asInternal()).isEqualTo("foo"); - assertThat(CqlIdentifier.fromCql("\"Foo\"").asInternal()).isEqualTo("Foo"); - assertThat(CqlIdentifier.fromCql("\"foo bar\"").asInternal()).isEqualTo("foo bar"); - assertThat(CqlIdentifier.fromCql("\"foo\"\"bar\"").asInternal()).isEqualTo("foo\"bar"); - assertThat(CqlIdentifier.fromCql("\"create\"").asInternal()).isEqualTo("create"); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_build_from_valid_cql(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(CqlIdentifier.fromCql("foo").asInternal()).isEqualTo("foo"); + assertThat(CqlIdentifier.fromCql("Foo").asInternal()).isEqualTo("foo"); + assertThat(CqlIdentifier.fromCql("\"Foo\"").asInternal()).isEqualTo("Foo"); + assertThat(CqlIdentifier.fromCql("\"foo bar\"").asInternal()).isEqualTo("foo bar"); + assertThat(CqlIdentifier.fromCql("\"foo\"\"bar\"").asInternal()).isEqualTo("foo\"bar"); + assertThat(CqlIdentifier.fromCql("\"create\"").asInternal()).isEqualTo("create"); + // JAVA-2883: this would fail under turkish locale if it was used internally + assertThat(CqlIdentifier.fromCql("TITLE").asInternal()).isEqualTo("title"); + } finally { + Locale.setDefault(def); + } } @Test(expected = IllegalArgumentException.class) diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java b/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java index 1525d95b1da..61beb5cea51 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/VersionAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java index 4a61e246827..bce30816f9c 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/VersionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java new file mode 100644 index 00000000000..44d2acfbb2e --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProviderTest.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.auth.PlainTextAuthProviderBase.Credentials; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.Strict.class) +public class ProgrammaticPlainTextAuthProviderTest { + + @Mock private EndPoint endpoint; + + @Test + public void should_return_correct_credentials_without_authorization_id() { + // given + ProgrammaticPlainTextAuthProvider provider = + new ProgrammaticPlainTextAuthProvider("user", "pass"); + // when + Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); + // then + assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); + assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); + assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); + } + + @Test + public void should_return_correct_credentials_with_authorization_id() { + // given + ProgrammaticPlainTextAuthProvider provider = + new ProgrammaticPlainTextAuthProvider("user", "pass", "proxy"); + // when + Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); + // then + assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); + assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); + assertThat(credentials.getAuthorizationId()).isEqualTo("proxy".toCharArray()); + } + + @Test + public void should_change_username() { + // given + ProgrammaticPlainTextAuthProvider provider = + new ProgrammaticPlainTextAuthProvider("user", "pass"); + // when + provider.setUsername("user2"); + Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); + // then + assertThat(credentials.getUsername()).isEqualTo("user2".toCharArray()); + assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); + assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); + } + + @Test + public void should_change_password() { + // given + ProgrammaticPlainTextAuthProvider provider = + new ProgrammaticPlainTextAuthProvider("user", "pass"); + // when + provider.setPassword("pass2"); + Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); + // then + assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); + assertThat(credentials.getPassword()).isEqualTo("pass2".toCharArray()); + assertThat(credentials.getAuthorizationId()).isEqualTo(new char[0]); + } + + @Test + public void should_change_authorization_id() { + // given + ProgrammaticPlainTextAuthProvider provider = + new ProgrammaticPlainTextAuthProvider("user", "pass", "proxy"); + // when + provider.setAuthorizationId("proxy2"); + Credentials credentials = provider.getCredentials(endpoint, "irrelevant"); + // then + assertThat(credentials.getUsername()).isEqualTo("user".toCharArray()); + assertThat(credentials.getPassword()).isEqualTo("pass".toCharArray()); + assertThat(credentials.getAuthorizationId()).isEqualTo("proxy2".toCharArray()); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java new file mode 100644 index 00000000000..ec0410ed868 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/config/OptionsMapTest.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.config; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.internal.SerializationHelper; +import java.time.Duration; +import java.util.function.Consumer; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class OptionsMapTest { + @Mock private Consumer mockListener; + + @Test + public void should_serialize_and_deserialize() { + // Given + OptionsMap initial = OptionsMap.driverDefaults(); + Duration slowTimeout = Duration.ofSeconds(30); + initial.put("slow", TypedDriverOption.REQUEST_TIMEOUT, slowTimeout); + initial.addChangeListener(mockListener); + + // When + OptionsMap deserialized = SerializationHelper.serializeAndDeserialize(initial); + + // Then + assertThat(deserialized.get(TypedDriverOption.REQUEST_TIMEOUT)) + .isEqualTo(Duration.ofSeconds(2)); + assertThat(deserialized.get("slow", TypedDriverOption.REQUEST_TIMEOUT)).isEqualTo(slowTimeout); + // Listeners are transient + assertThat(deserialized.removeChangeListener(mockListener)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java new file mode 100644 index 00000000000..eee4000a459 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/config/TypedDriverOptionTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.config; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.HashSet; +import java.util.Set; +import org.junit.Test; + +public class TypedDriverOptionTest { + + /** + * Checks that every built-in {@link DriverOption} has an equivalent constant in {@link + * TypedDriverOption}. + */ + @Test + public void should_have_equivalents_for_all_builtin_untyped_options() { + Set optionsThatHaveATypedEquivalent = new HashSet<>(); + for (TypedDriverOption typedOption : TypedDriverOption.builtInValues()) { + optionsThatHaveATypedEquivalent.add(typedOption.getRawOption()); + } + + // These options are only used internally to compare policy configurations across profiles. + // Users never use them directly, so they don't need typed equivalents. + Set exclusions = + ImmutableSet.of( + DefaultDriverOption.LOAD_BALANCING_POLICY, + DefaultDriverOption.RETRY_POLICY, + DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY); + + for (DriverOption option : + ImmutableSet.builder() + .add(DefaultDriverOption.values()) + .add(DseDriverOption.values()) + .build()) { + if (!exclusions.contains(option)) { + assertThat(optionsThatHaveATypedEquivalent) + .as( + "Couldn't find a typed equivalent for %s.%s. " + + "You need to either add a constant in %s, or an exclusion in this test.", + option.getClass().getSimpleName(), option, TypedDriverOption.class.getSimpleName()) + .contains(option); + } + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java new file mode 100644 index 00000000000..9904b1e27d7 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementBuilderTest.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import java.nio.ByteBuffer; +import org.junit.Test; + +public class StatementBuilderTest { + + private static class MockSimpleStatementBuilder + extends StatementBuilder { + + public MockSimpleStatementBuilder() { + super(); + } + + public MockSimpleStatementBuilder(SimpleStatement template) { + super(template); + } + + @Override + public SimpleStatement build() { + + SimpleStatement rv = mock(SimpleStatement.class); + when(rv.isTracing()).thenReturn(this.tracing); + when(rv.getRoutingKey()).thenReturn(this.routingKey); + return rv; + } + } + + @Test + public void should_handle_set_tracing_without_args() { + + MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); + assertThat(builder.build().isTracing()).isFalse(); + builder.setTracing(); + assertThat(builder.build().isTracing()).isTrue(); + } + + @Test + public void should_handle_set_tracing_with_args() { + + MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); + assertThat(builder.build().isTracing()).isFalse(); + builder.setTracing(true); + assertThat(builder.build().isTracing()).isTrue(); + builder.setTracing(false); + assertThat(builder.build().isTracing()).isFalse(); + } + + @Test + public void should_override_set_tracing_in_template() { + + SimpleStatement template = SimpleStatement.builder("select * from system.peers").build(); + MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(template); + assertThat(builder.build().isTracing()).isFalse(); + builder.setTracing(true); + assertThat(builder.build().isTracing()).isTrue(); + + template = SimpleStatement.builder("select * from system.peers").setTracing().build(); + builder = new MockSimpleStatementBuilder(template); + assertThat(builder.build().isTracing()).isTrue(); + builder.setTracing(false); + assertThat(builder.build().isTracing()).isFalse(); + } + + @Test + public void should_match_set_routing_key_vararg() { + + ByteBuffer buff1 = ByteBuffer.wrap("the quick brown fox".getBytes(Charsets.UTF_8)); + ByteBuffer buff2 = ByteBuffer.wrap("jumped over the lazy dog".getBytes(Charsets.UTF_8)); + + Statement expectedStmt = + SimpleStatement.builder("select * from system.peers").build().setRoutingKey(buff1, buff2); + + MockSimpleStatementBuilder builder = new MockSimpleStatementBuilder(); + Statement builderStmt = builder.setRoutingKey(buff1, buff2).build(); + assertThat(expectedStmt.getRoutingKey()).isEqualTo(builderStmt.getRoutingKey()); + + /* Confirm that order matters here */ + builderStmt = builder.setRoutingKey(buff2, buff1).build(); + assertThat(expectedStmt.getRoutingKey()).isNotEqualTo(builderStmt.getRoutingKey()); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java new file mode 100644 index 00000000000..af2dccd0432 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/cql/StatementProfileTest.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.cql.DefaultBoundStatement; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class StatementProfileTest { + + private static final DriverExecutionProfile PROFILE = mock(DriverExecutionProfile.class); + private static final String NAME = "mockProfileName"; + + @Test + @UseDataProvider("statements") + public void should_set_profile_and_name_on_statement( + Statement statement, + Operation operation1, + Operation operation2, + String expectedName, + DriverExecutionProfile expectedProfile) { + + statement = operation1.applyTo(statement); + statement = operation2.applyTo(statement); + + assertThat(statement.getExecutionProfileName()).isEqualTo(expectedName); + assertThat(statement.getExecutionProfile()).isEqualTo(expectedProfile); + } + + @Test + @UseDataProvider("builders") + public void should_set_profile_and_name_on_builder( + StatementBuilder builder, + Operation operation1, + Operation operation2, + String expectedName, + DriverExecutionProfile expectedProfile) { + + builder = operation1.applyTo(builder); + builder = operation2.applyTo(builder); + + Statement statement = builder.build(); + + assertThat(statement.getExecutionProfileName()).isEqualTo(expectedName); + assertThat(statement.getExecutionProfile()).isEqualTo(expectedProfile); + } + + private static Object[][] scenarios() { + return new Object[][] { + // operation1, operation2, expectedName, expectedProfile + + // only one set: + new Object[] {setProfile(PROFILE), noop(), null, PROFILE}, + new Object[] {setName(NAME), noop(), NAME, null}, + + // last one wins: + new Object[] {setProfile(PROFILE), setName(NAME), NAME, null}, + new Object[] {setName(NAME), setProfile(PROFILE), null, PROFILE}, + + // null does not unset other: + new Object[] {setProfile(PROFILE), setName(null), null, PROFILE}, + new Object[] {setName(NAME), setProfile(null), NAME, null}, + }; + } + + @DataProvider + public static Object[][] statements() { + SimpleStatement simpleStatement = SimpleStatement.newInstance("mock query"); + Object[][] statements = + TestDataProviders.fromList( + simpleStatement, + newBoundStatement(), + BatchStatement.newInstance(BatchType.LOGGED, simpleStatement)); + + return TestDataProviders.combine(statements, scenarios()); + } + + @DataProvider + public static Object[][] builders() { + SimpleStatement simpleStatement = SimpleStatement.newInstance("mock query"); + Object[][] builders = + TestDataProviders.fromList( + SimpleStatement.builder(simpleStatement), + new BoundStatementBuilder(newBoundStatement()), + BatchStatement.builder(BatchType.LOGGED).addStatement(simpleStatement)); + + return TestDataProviders.combine(builders, scenarios()); + } + + private interface Operation { + + Statement applyTo(Statement statement); + + StatementBuilder applyTo(StatementBuilder builder); + } + + private static Operation setProfile(DriverExecutionProfile profile) { + return new Operation() { + @Override + public Statement applyTo(Statement statement) { + return statement.setExecutionProfile(profile); + } + + @Override + public StatementBuilder applyTo(StatementBuilder builder) { + return builder.setExecutionProfile(profile); + } + }; + } + + private static Operation setName(String name) { + return new Operation() { + @Override + public Statement applyTo(Statement statement) { + return statement.setExecutionProfileName(name); + } + + @Override + public StatementBuilder applyTo(StatementBuilder builder) { + return builder.setExecutionProfileName(name); + } + }; + } + + private static Operation noop() { + return new Operation() { + @Override + public Statement applyTo(Statement statement) { + return statement; + } + + @Override + public StatementBuilder applyTo(StatementBuilder builder) { + return builder; + } + }; + } + + private static BoundStatement newBoundStatement() { + // Mock the minimum state needed to create a DefaultBoundStatement that can also be used to + // initialize a builder + PreparedStatement preparedStatement = mock(PreparedStatement.class); + ColumnDefinitions variableDefinitions = mock(ColumnDefinitions.class); + when(preparedStatement.getVariableDefinitions()).thenReturn(variableDefinitions); + return new DefaultBoundStatement( + preparedStatement, + variableDefinitions, + new ByteBuffer[0], + null, + null, + null, + null, + null, + Collections.emptyMap(), + null, + false, + Statement.NO_DEFAULT_TIMESTAMP, + null, + 5000, + null, + null, + Duration.ZERO, + null, + null, + null, + Statement.NO_NOW_IN_SECONDS); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java index a880f4a8579..f55453b3eba 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlDurationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,92 +21,120 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.fail; +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.internal.SerializationHelper; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; import java.time.ZonedDateTime; import java.time.temporal.ChronoUnit; import java.time.temporal.UnsupportedTemporalTypeException; +import java.util.Locale; import org.junit.Test; +import org.junit.runner.RunWith; +@RunWith(DataProviderRunner.class) public class CqlDurationTest { @Test - public void should_parse_from_string_with_standard_pattern() { - assertThat(CqlDuration.from("1y2mo")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("-1y2mo")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("1Y2MO")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("2w")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); - assertThat(CqlDuration.from("2d10h")) - .isEqualTo(CqlDuration.newInstance(0, 2, 10 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("2d")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("30h")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("30h20m")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("20m")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("56s")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("567ms")) - .isEqualTo(CqlDuration.newInstance(0, 0, 567 * CqlDuration.NANOS_PER_MILLI)); - assertThat(CqlDuration.from("1950us")) - .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); - assertThat(CqlDuration.from("1950µs")) - .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); - assertThat(CqlDuration.from("1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); - assertThat(CqlDuration.from("1950000NS")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); - assertThat(CqlDuration.from("-1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, -1950000)); - assertThat(CqlDuration.from("1y3mo2h10m")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_from_string_with_standard_pattern(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(CqlDuration.from("1y2mo")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); + assertThat(CqlDuration.from("-1y2mo")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); + assertThat(CqlDuration.from("1Y2MO")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); + assertThat(CqlDuration.from("2w")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); + assertThat(CqlDuration.from("2d10h")) + .isEqualTo(CqlDuration.newInstance(0, 2, 10 * CqlDuration.NANOS_PER_HOUR)); + assertThat(CqlDuration.from("2d")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); + assertThat(CqlDuration.from("30h")) + .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); + assertThat(CqlDuration.from("30h20m")) + .isEqualTo( + CqlDuration.newInstance( + 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); + assertThat(CqlDuration.from("20m")) + .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); + assertThat(CqlDuration.from("56s")) + .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); + assertThat(CqlDuration.from("567ms")) + .isEqualTo(CqlDuration.newInstance(0, 0, 567 * CqlDuration.NANOS_PER_MILLI)); + assertThat(CqlDuration.from("1950us")) + .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); + assertThat(CqlDuration.from("1950µs")) + .isEqualTo(CqlDuration.newInstance(0, 0, 1950 * CqlDuration.NANOS_PER_MICRO)); + assertThat(CqlDuration.from("1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); + assertThat(CqlDuration.from("1950000NS")).isEqualTo(CqlDuration.newInstance(0, 0, 1950000)); + assertThat(CqlDuration.from("-1950000ns")).isEqualTo(CqlDuration.newInstance(0, 0, -1950000)); + assertThat(CqlDuration.from("1y3mo2h10m")) + .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); + } finally { + Locale.setDefault(def); + } } @Test - public void should_parse_from_string_with_iso8601_pattern() { - assertThat(CqlDuration.from("P1Y2D")).isEqualTo(CqlDuration.newInstance(12, 2, 0)); - assertThat(CqlDuration.from("P1Y2M")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("P2W")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); - assertThat(CqlDuration.from("P1YT2H")) - .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("-P1Y2M")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("P2D")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("PT30H")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("PT30H20M")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("PT20M")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("PT56S")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("P1Y3MT2H10M")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_from_string_with_iso8601_pattern(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(CqlDuration.from("P1Y2D")).isEqualTo(CqlDuration.newInstance(12, 2, 0)); + assertThat(CqlDuration.from("P1Y2M")).isEqualTo(CqlDuration.newInstance(14, 0, 0)); + assertThat(CqlDuration.from("P2W")).isEqualTo(CqlDuration.newInstance(0, 14, 0)); + assertThat(CqlDuration.from("P1YT2H")) + .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); + assertThat(CqlDuration.from("-P1Y2M")).isEqualTo(CqlDuration.newInstance(-14, 0, 0)); + assertThat(CqlDuration.from("P2D")).isEqualTo(CqlDuration.newInstance(0, 2, 0)); + assertThat(CqlDuration.from("PT30H")) + .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); + assertThat(CqlDuration.from("PT30H20M")) + .isEqualTo( + CqlDuration.newInstance( + 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); + assertThat(CqlDuration.from("PT20M")) + .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); + assertThat(CqlDuration.from("PT56S")) + .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); + assertThat(CqlDuration.from("P1Y3MT2H10M")) + .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); + } finally { + Locale.setDefault(def); + } } @Test - public void should_parse_from_string_with_iso8601_alternative_pattern() { - assertThat(CqlDuration.from("P0001-00-02T00:00:00")) - .isEqualTo(CqlDuration.newInstance(12, 2, 0)); - assertThat(CqlDuration.from("P0001-02-00T00:00:00")) - .isEqualTo(CqlDuration.newInstance(14, 0, 0)); - assertThat(CqlDuration.from("P0001-00-00T02:00:00")) - .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("-P0001-02-00T00:00:00")) - .isEqualTo(CqlDuration.newInstance(-14, 0, 0)); - assertThat(CqlDuration.from("P0000-00-02T00:00:00")) - .isEqualTo(CqlDuration.newInstance(0, 2, 0)); - assertThat(CqlDuration.from("P0000-00-00T30:00:00")) - .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); - assertThat(CqlDuration.from("P0000-00-00T30:20:00")) - .isEqualTo( - CqlDuration.newInstance( - 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("P0000-00-00T00:20:00")) - .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); - assertThat(CqlDuration.from("P0000-00-00T00:00:56")) - .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); - assertThat(CqlDuration.from("P0001-03-00T02:10:00")) - .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_from_string_with_iso8601_alternative_pattern(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(CqlDuration.from("P0001-00-02T00:00:00")) + .isEqualTo(CqlDuration.newInstance(12, 2, 0)); + assertThat(CqlDuration.from("P0001-02-00T00:00:00")) + .isEqualTo(CqlDuration.newInstance(14, 0, 0)); + assertThat(CqlDuration.from("P0001-00-00T02:00:00")) + .isEqualTo(CqlDuration.newInstance(12, 0, 2 * CqlDuration.NANOS_PER_HOUR)); + assertThat(CqlDuration.from("-P0001-02-00T00:00:00")) + .isEqualTo(CqlDuration.newInstance(-14, 0, 0)); + assertThat(CqlDuration.from("P0000-00-02T00:00:00")) + .isEqualTo(CqlDuration.newInstance(0, 2, 0)); + assertThat(CqlDuration.from("P0000-00-00T30:00:00")) + .isEqualTo(CqlDuration.newInstance(0, 0, 30 * CqlDuration.NANOS_PER_HOUR)); + assertThat(CqlDuration.from("P0000-00-00T30:20:00")) + .isEqualTo( + CqlDuration.newInstance( + 0, 0, 30 * CqlDuration.NANOS_PER_HOUR + 20 * CqlDuration.NANOS_PER_MINUTE)); + assertThat(CqlDuration.from("P0000-00-00T00:20:00")) + .isEqualTo(CqlDuration.newInstance(0, 0, 20 * CqlDuration.NANOS_PER_MINUTE)); + assertThat(CqlDuration.from("P0000-00-00T00:00:56")) + .isEqualTo(CqlDuration.newInstance(0, 0, 56 * CqlDuration.NANOS_PER_SECOND)); + assertThat(CqlDuration.from("P0001-03-00T02:10:00")) + .isEqualTo(CqlDuration.newInstance(15, 0, 130 * CqlDuration.NANOS_PER_MINUTE)); + } finally { + Locale.setDefault(def); + } } @Test @@ -163,4 +193,18 @@ public void should_subtract_from_temporal() { assertThat(dateTime.minus(CqlDuration.from("1h15s15ns"))) .isEqualTo("2018-10-03T22:59:44.999999985-07:00[America/Los_Angeles]"); } + + @Test + public void should_serialize_and_deserialize() throws Exception { + CqlDuration initial = CqlDuration.from("3mo2d15s"); + CqlDuration deserialized = SerializationHelper.serializeAndDeserialize(initial); + assertThat(deserialized).isEqualTo(initial); + } + + @Test + public void should_serialize_and_deserialize_negative() throws Exception { + CqlDuration initial = CqlDuration.from("-2d15m"); + CqlDuration deserialized = SerializationHelper.serializeAndDeserialize(initial); + assertThat(deserialized).isEqualTo(initial); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java new file mode 100644 index 00000000000..3e0872cb946 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/data/CqlVectorTest.java @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.data; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.internal.SerializationHelper; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.io.ByteArrayInputStream; +import java.io.ObjectInputStream; +import java.io.ObjectStreamException; +import java.time.LocalTime; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.commons.codec.DecoderException; +import org.apache.commons.codec.binary.Hex; +import org.assertj.core.util.Lists; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class CqlVectorTest { + + @DataProvider + public static Object[][] dataProvider() { + return new Object[][] { + {new Float[] {1.0f, 2.5f}}, + {new LocalTime[] {LocalTime.of(1, 2), LocalTime.of(3, 4)}}, + {new List[] {Arrays.asList(1, 2), Arrays.asList(3, 4)}}, + {new CqlVector[] {CqlVector.newInstance("a", "bc"), CqlVector.newInstance("d", "ef")}} + }; + } + + private void validate_built_vector(CqlVector vec, Object[] expectedVals) { + assertThat(vec.size()).isEqualTo(2); + assertThat(vec.isEmpty()).isFalse(); + assertThat(vec.get(0)).isEqualTo(expectedVals[0]); + assertThat(vec.get(1)).isEqualTo(expectedVals[1]); + } + + @UseDataProvider("dataProvider") + @Test + public void should_build_vector_from_elements(Object[] vals) { + validate_built_vector(CqlVector.newInstance(vals), vals); + } + + @Test + @UseDataProvider("dataProvider") + public void should_build_vector_from_list(Object[] vals) { + validate_built_vector(CqlVector.newInstance(Lists.newArrayList(vals)), vals); + } + + @Test + public void should_throw_from_null_string() { + assertThatThrownBy( + () -> { + CqlVector.from(null, TypeCodecs.FLOAT); + }) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_throw_from_empty_string() { + + assertThatThrownBy( + () -> { + CqlVector.from("", TypeCodecs.FLOAT); + }) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_throw_when_building_with_nulls() { + + assertThatThrownBy( + () -> { + CqlVector.newInstance(1.1f, null, 2.2f); + }) + .isInstanceOf(IllegalArgumentException.class); + + Float[] theArray = new Float[] {1.1f, null, 2.2f}; + assertThatThrownBy( + () -> { + CqlVector.newInstance(theArray); + }) + .isInstanceOf(IllegalArgumentException.class); + + List theList = Lists.newArrayList(1.1f, null, 2.2f); + assertThatThrownBy( + () -> { + CqlVector.newInstance(theList); + }) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_build_empty_vector() { + CqlVector vector = CqlVector.newInstance(); + assertThat(vector.isEmpty()).isTrue(); + assertThat(vector.size()).isEqualTo(0); + } + + @Test + @UseDataProvider("dataProvider") + public void should_behave_mostly_like_a_list(T[] vals) { + T[] theArray = Arrays.copyOf(vals, vals.length); + CqlVector vector = CqlVector.newInstance(theArray); + assertThat(vector.get(0)).isEqualTo(theArray[0]); + vector.set(0, theArray[1]); + assertThat(vector.get(0)).isEqualTo(theArray[1]); + assertThat(vector.isEmpty()).isFalse(); + assertThat(vector.size()).isEqualTo(2); + Iterator iterator = vector.iterator(); + assertThat(iterator.next()).isEqualTo(theArray[1]); + assertThat(iterator.next()).isEqualTo(theArray[1]); + } + + @Test + @UseDataProvider("dataProvider") + public void should_play_nicely_with_streams(T[] vals) { + CqlVector vector = CqlVector.newInstance(vals); + List results = + vector.stream() + .map(Object::toString) + .collect(Collectors.toCollection(() -> new ArrayList())); + for (int i = 0; i < vector.size(); ++i) { + assertThat(results.get(i)).isEqualTo(vector.get(i).toString()); + } + } + + @Test + @UseDataProvider("dataProvider") + public void should_reflect_changes_to_mutable_list(T[] vals) { + List theList = Lists.newArrayList(vals); + CqlVector vector = CqlVector.newInstance(theList); + assertThat(vector.size()).isEqualTo(2); + assertThat(vector.get(1)).isEqualTo(vals[1]); + + T newVal = vals[0]; + theList.set(1, newVal); + assertThat(vector.size()).isEqualTo(2); + assertThat(vector.get(1)).isEqualTo(newVal); + } + + @Test + @UseDataProvider("dataProvider") + public void should_reflect_changes_to_array(T[] vals) { + T[] theArray = Arrays.copyOf(vals, vals.length); + CqlVector vector = CqlVector.newInstance(theArray); + assertThat(vector.size()).isEqualTo(2); + assertThat(vector.get(1)).isEqualTo(theArray[1]); + + T newVal = theArray[0]; + theArray[1] = newVal; + assertThat(vector.size()).isEqualTo(2); + assertThat(vector.get(1)).isEqualTo(newVal); + } + + @Test + @UseDataProvider("dataProvider") + public void should_correctly_compare_vectors(T[] vals) { + CqlVector vector1 = CqlVector.newInstance(vals); + CqlVector vector2 = CqlVector.newInstance(vals); + CqlVector vector3 = CqlVector.newInstance(Lists.newArrayList(vals)); + assertThat(vector1).isNotSameAs(vector2); + assertThat(vector1).isEqualTo(vector2); + assertThat(vector1).isNotSameAs(vector3); + assertThat(vector1).isEqualTo(vector3); + + T[] differentArgs = Arrays.copyOf(vals, vals.length); + T newVal = differentArgs[1]; + differentArgs[0] = newVal; + CqlVector vector4 = CqlVector.newInstance(differentArgs); + assertThat(vector1).isNotSameAs(vector4); + assertThat(vector1).isNotEqualTo(vector4); + + T[] biggerArgs = Arrays.copyOf(vals, vals.length + 1); + biggerArgs[biggerArgs.length - 1] = newVal; + CqlVector vector5 = CqlVector.newInstance(biggerArgs); + assertThat(vector1).isNotSameAs(vector5); + assertThat(vector1).isNotEqualTo(vector5); + } + + @Test + @UseDataProvider("dataProvider") + public void should_serialize_and_deserialize(T[] vals) throws Exception { + CqlVector initial = CqlVector.newInstance(vals); + CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); + assertThat(deserialized).isEqualTo(initial); + } + + @Test + public void should_serialize_and_deserialize_empty_vector() throws Exception { + CqlVector initial = CqlVector.newInstance(Collections.emptyList()); + CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); + assertThat(deserialized).isEqualTo(initial); + } + + @Test + @UseDataProvider("dataProvider") + public void should_serialize_and_deserialize_unserializable_list(T[] vals) throws Exception { + CqlVector initial = + CqlVector.newInstance( + new AbstractList() { + @Override + public T get(int index) { + return vals[index]; + } + + @Override + public int size() { + return vals.length; + } + }); + CqlVector deserialized = SerializationHelper.serializeAndDeserialize(initial); + assertThat(deserialized).isEqualTo(initial); + } + + @Test + public void should_not_use_preallocate_serialized_size() throws DecoderException { + // serialized CqlVector(1.0f, 2.5f, 3.0f) with size field adjusted to Integer.MAX_VALUE + byte[] suspiciousBytes = + Hex.decodeHex( + "aced000573720042636f6d2e64617461737461782e6f73732e6472697665722e6170692e636f72652e646174612e43716c566563746f722453657269616c697a6174696f6e50726f78790000000000000001030000787077047fffffff7372000f6a6176612e6c616e672e466c6f6174daedc9a2db3cf0ec02000146000576616c7565787200106a6176612e6c616e672e4e756d62657286ac951d0b94e08b02000078703f8000007371007e0002402000007371007e00024040000078" + .toCharArray()); + try { + new ObjectInputStream(new ByteArrayInputStream(suspiciousBytes)).readObject(); + fail("Should not be able to deserialize bytes with incorrect size field"); + } catch (Exception e) { + // check we fail to deserialize, rather than OOM + assertThat(e).isInstanceOf(ObjectStreamException.class); + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java new file mode 100644 index 00000000000..a5b9b447e6a --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListenerTest.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.metadata; + +import com.datastax.oss.driver.api.core.session.Session; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class SafeInitNodeStateListenerTest { + + @Mock private NodeStateListener delegate; + @Mock private Node node; + @Mock private Session session; + + @Test + public void should_replay_init_events() { + SafeInitNodeStateListener wrapper = new SafeInitNodeStateListener(delegate, true); + + // Not a realistic sequence of invocations in the driver, but that doesn't matter + wrapper.onAdd(node); + wrapper.onUp(node); + wrapper.onSessionReady(session); + wrapper.onDown(node); + + InOrder inOrder = Mockito.inOrder(delegate); + inOrder.verify(delegate).onSessionReady(session); + inOrder.verify(delegate).onAdd(node); + inOrder.verify(delegate).onUp(node); + inOrder.verify(delegate).onDown(node); + } + + @Test + public void should_discard_init_events() { + SafeInitNodeStateListener wrapper = new SafeInitNodeStateListener(delegate, false); + + wrapper.onAdd(node); + wrapper.onUp(node); + wrapper.onSessionReady(session); + wrapper.onDown(node); + + InOrder inOrder = Mockito.inOrder(delegate); + inOrder.verify(delegate).onSessionReady(session); + inOrder.verify(delegate).onDown(node); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java new file mode 100644 index 00000000000..3963bf6de84 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerAsyncTest.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.paging; + +import static com.datastax.oss.driver.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.paging.OffsetPager.Page; +import com.datastax.oss.driver.internal.core.MockAsyncPagingIterable; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.concurrent.CompletionStage; +import org.junit.Test; + +public class OffsetPagerAsyncTest extends OffsetPagerTestBase { + + @Override + protected Page getActualPage( + OffsetPager pager, OffsetPagerTestFixture fixture, int fetchSize) { + CompletionStage> pageFuture = + pager.getPage(fixture.getAsyncIterable(fetchSize), fixture.getRequestedPage()); + return CompletableFutures.getCompleted(pageFuture); + } + + /** + * Covers the corner case where the server sends back an empty frame at the end of the result set. + */ + @Test + @UseDataProvider("fetchSizes") + public void should_return_last_page_when_result_finishes_with_empty_frame(int fetchSize) { + MockAsyncPagingIterable iterable = + new MockAsyncPagingIterable<>(ImmutableList.of("a", "b", "c"), fetchSize, true); + OffsetPager pager = new OffsetPager(3); + Page page = CompletableFutures.getCompleted(pager.getPage(iterable, 1)); + + assertThat(page.getElements()).containsExactly("a", "b", "c"); + assertThat(page.getPageNumber()).isEqualTo(1); + assertThat(page.isLast()).isTrue(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java new file mode 100644 index 00000000000..0d8b380dd49 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerSyncTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.paging; + +public class OffsetPagerSyncTest extends OffsetPagerTestBase { + + @Override + protected OffsetPager.Page getActualPage( + OffsetPager pager, OffsetPagerTestFixture fixture, /*ignored*/ int fetchSize) { + return pager.getPage(fixture.getSyncIterable(), fixture.getRequestedPage()); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java new file mode 100644 index 00000000000..7f9ca2ddaa2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestBase.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.paging; + +import com.datastax.oss.driver.TestDataProviders; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public abstract class OffsetPagerTestBase { + + /** + * The fetch size only matters for the async implementation. For sync this will essentially run + * the same fixture 4 times, but that's not a problem because tests are fast. + */ + @DataProvider + public static Object[][] fetchSizes() { + return TestDataProviders.fromList(1, 2, 3, 100); + } + + @DataProvider + public static Object[][] scenarios() { + Object[][] fixtures = + TestDataProviders.fromList( + // ------- inputs -------- | ------ expected ------- + // iterable | page | size | page | contents | last? + "a,b,c,d,e,f | 1 | 3 | 1 | a,b,c | false", + "a,b,c,d,e,f | 2 | 3 | 2 | d,e,f | true", + "a,b,c,d,e,f | 2 | 4 | 2 | e,f | true", + "a,b,c,d,e,f | 2 | 5 | 2 | f | true", + "a,b,c | 1 | 3 | 1 | a,b,c | true", + "a,b | 1 | 3 | 1 | a,b | true", + "a | 1 | 3 | 1 | a | true", + // Empty iterator => return one empty page + " | 1 | 3 | 1 | | true", + // Past the end => return last page + "a,b,c,d,e,f | 3 | 3 | 2 | d,e,f | true", + "a,b,c,d,e | 3 | 3 | 2 | d,e | true"); + return TestDataProviders.combine(fixtures, fetchSizes()); + } + + @Test + @UseDataProvider("scenarios") + public void should_return_existing_page(String fixtureSpec, int fetchSize) { + OffsetPagerTestFixture fixture = new OffsetPagerTestFixture(fixtureSpec); + OffsetPager pager = new OffsetPager(fixture.getPageSize()); + OffsetPager.Page actualPage = getActualPage(pager, fixture, fetchSize); + fixture.assertMatches(actualPage); + } + + protected abstract OffsetPager.Page getActualPage( + OffsetPager pager, OffsetPagerTestFixture fixture, int fetchSize); +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java new file mode 100644 index 00000000000..91079722aa2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/paging/OffsetPagerTestFixture.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.paging; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.PagingIterable; +import com.datastax.oss.driver.internal.core.MockAsyncPagingIterable; +import com.datastax.oss.driver.internal.core.MockPagingIterable; +import com.datastax.oss.driver.shaded.guava.common.base.Splitter; +import java.util.List; + +public class OffsetPagerTestFixture { + + private static final Splitter SPEC_SPLITTER = Splitter.on('|').trimResults(); + private static final Splitter LIST_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings(); + + private final List inputElements; + private final int requestedPage; + private final int pageSize; + private final int expectedPageNumber; + private final List expectedElements; + private final boolean expectedIsLast; + + public OffsetPagerTestFixture(String spec) { + List components = SPEC_SPLITTER.splitToList(spec); + int size = components.size(); + if (size != 3 && size != 6) { + fail("Invalid fixture spec, expected 3 or 5 components"); + } + + this.inputElements = LIST_SPLITTER.splitToList(components.get(0)); + this.requestedPage = Integer.parseInt(components.get(1)); + this.pageSize = Integer.parseInt(components.get(2)); + if (size == 3) { + this.expectedPageNumber = -1; + this.expectedElements = null; + this.expectedIsLast = false; + } else { + this.expectedPageNumber = Integer.parseInt(components.get(3)); + this.expectedElements = LIST_SPLITTER.splitToList(components.get(4)); + this.expectedIsLast = Boolean.parseBoolean(components.get(5)); + } + } + + public PagingIterable getSyncIterable() { + return new MockPagingIterable<>(inputElements.iterator()); + } + + public MockAsyncPagingIterable getAsyncIterable(int fetchSize) { + return new MockAsyncPagingIterable<>(inputElements, fetchSize, false); + } + + public int getRequestedPage() { + return requestedPage; + } + + public int getPageSize() { + return pageSize; + } + + public void assertMatches(OffsetPager.Page actualPage) { + assertThat(actualPage.getPageNumber()).isEqualTo(expectedPageNumber); + assertThat(actualPage.getElements()).isEqualTo(expectedElements); + assertThat(actualPage.isLast()).isEqualTo(expectedIsLast); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java new file mode 100644 index 00000000000..e4463d833bf --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/api/core/retry/ConsistencyDowngradingRetryPolicyTest.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.retry; + +import static com.datastax.oss.driver.api.core.ConsistencyLevel.EACH_QUORUM; +import static com.datastax.oss.driver.api.core.ConsistencyLevel.ONE; +import static com.datastax.oss.driver.api.core.ConsistencyLevel.QUORUM; +import static com.datastax.oss.driver.api.core.ConsistencyLevel.SERIAL; +import static com.datastax.oss.driver.api.core.ConsistencyLevel.THREE; +import static com.datastax.oss.driver.api.core.ConsistencyLevel.TWO; +import static com.datastax.oss.driver.api.core.retry.RetryDecision.IGNORE; +import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETHROW; +import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_NEXT; +import static com.datastax.oss.driver.api.core.retry.RetryDecision.RETRY_SAME; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.BATCH_LOG; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.CAS; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.CDC; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.COUNTER; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.SIMPLE; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.UNLOGGED_BATCH; +import static com.datastax.oss.driver.api.core.servererrors.WriteType.VIEW; + +import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; +import com.datastax.oss.driver.api.core.connection.HeartbeatException; +import com.datastax.oss.driver.api.core.servererrors.OverloadedException; +import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; +import com.datastax.oss.driver.api.core.servererrors.ServerError; +import com.datastax.oss.driver.api.core.servererrors.TruncateException; +import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; +import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy; +import org.junit.Test; + +public class ConsistencyDowngradingRetryPolicyTest extends RetryPolicyTestBase { + + public ConsistencyDowngradingRetryPolicyTest() { + super(new ConsistencyDowngradingRetryPolicy("test")); + } + + @Test + public void should_process_read_timeouts() { + // retry count != 0 + assertOnReadTimeout(QUORUM, 2, 2, false, 1).hasDecision(RETHROW); + // serial CL + assertOnReadTimeout(SERIAL, 2, 2, false, 0).hasDecision(RETHROW); + // received < blockFor + assertOnReadTimeout(QUORUM, 4, 3, true, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); + assertOnReadTimeout(QUORUM, 4, 3, false, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); + assertOnReadTimeout(QUORUM, 3, 2, true, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); + assertOnReadTimeout(QUORUM, 3, 2, false, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); + assertOnReadTimeout(QUORUM, 2, 1, true, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); + assertOnReadTimeout(QUORUM, 2, 1, false, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); + assertOnReadTimeout(EACH_QUORUM, 2, 0, true, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); + assertOnReadTimeout(EACH_QUORUM, 2, 0, false, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); + assertOnReadTimeout(QUORUM, 2, 0, true, 0).hasDecision(RETHROW); + assertOnReadTimeout(QUORUM, 2, 0, false, 0).hasDecision(RETHROW); + // data present + assertOnReadTimeout(QUORUM, 2, 2, false, 0).hasDecision(RETRY_SAME); + assertOnReadTimeout(QUORUM, 2, 2, true, 0).hasDecision(RETHROW); + } + + @Test + public void should_process_write_timeouts() { + // retry count != 0 + assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).hasDecision(RETHROW); + // SIMPLE + assertOnWriteTimeout(QUORUM, SIMPLE, 2, 1, 0).hasDecision(IGNORE); + assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).hasDecision(RETHROW); + // BATCH + assertOnWriteTimeout(QUORUM, BATCH, 2, 1, 0).hasDecision(IGNORE); + assertOnWriteTimeout(QUORUM, BATCH, 2, 0, 0).hasDecision(RETHROW); + // UNLOGGED_BATCH + assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 4, 3, 0) + .hasDecision(RETRY_SAME) + .hasConsistency(THREE); + assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 3, 2, 0) + .hasDecision(RETRY_SAME) + .hasConsistency(TWO); + assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 2, 1, 0) + .hasDecision(RETRY_SAME) + .hasConsistency(ONE); + assertOnWriteTimeout(EACH_QUORUM, UNLOGGED_BATCH, 2, 0, 0) + .hasDecision(RETRY_SAME) + .hasConsistency(ONE); + assertOnWriteTimeout(QUORUM, UNLOGGED_BATCH, 2, 0, 0).hasDecision(RETHROW); + // BATCH_LOG + assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 1, 0).hasDecision(RETRY_SAME); + // others + assertOnWriteTimeout(QUORUM, COUNTER, 2, 1, 0).hasDecision(RETHROW); + assertOnWriteTimeout(QUORUM, CAS, 2, 1, 0).hasDecision(RETHROW); + assertOnWriteTimeout(QUORUM, VIEW, 2, 1, 0).hasDecision(RETHROW); + assertOnWriteTimeout(QUORUM, CDC, 2, 1, 0).hasDecision(RETHROW); + } + + @Test + public void should_process_unavailable() { + // retry count != 0 + assertOnUnavailable(QUORUM, 2, 1, 1).hasDecision(RETHROW); + // SERIAL + assertOnUnavailable(SERIAL, 2, 1, 0).hasDecision(RETRY_NEXT); + // downgrade + assertOnUnavailable(QUORUM, 4, 3, 0).hasDecision(RETRY_SAME).hasConsistency(THREE); + assertOnUnavailable(QUORUM, 3, 2, 0).hasDecision(RETRY_SAME).hasConsistency(TWO); + assertOnUnavailable(QUORUM, 2, 1, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); + assertOnUnavailable(EACH_QUORUM, 2, 0, 0).hasDecision(RETRY_SAME).hasConsistency(ONE); + assertOnUnavailable(QUORUM, 2, 0, 0).hasDecision(RETHROW); + } + + @Test + public void should_process_aborted_request() { + assertOnRequestAborted(ClosedConnectionException.class, 0).hasDecision(RETRY_NEXT); + assertOnRequestAborted(ClosedConnectionException.class, 1).hasDecision(RETRY_NEXT); + assertOnRequestAborted(HeartbeatException.class, 0).hasDecision(RETRY_NEXT); + assertOnRequestAborted(HeartbeatException.class, 1).hasDecision(RETRY_NEXT); + assertOnRequestAborted(Throwable.class, 0).hasDecision(RETHROW); + } + + @Test + public void should_process_error_response() { + assertOnErrorResponse(ReadFailureException.class, 0).hasDecision(RETHROW); + assertOnErrorResponse(ReadFailureException.class, 1).hasDecision(RETHROW); + assertOnErrorResponse(WriteFailureException.class, 0).hasDecision(RETHROW); + assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); + assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); + + assertOnErrorResponse(OverloadedException.class, 0).hasDecision(RETRY_NEXT); + assertOnErrorResponse(OverloadedException.class, 1).hasDecision(RETRY_NEXT); + assertOnErrorResponse(ServerError.class, 0).hasDecision(RETRY_NEXT); + assertOnErrorResponse(ServerError.class, 1).hasDecision(RETRY_NEXT); + assertOnErrorResponse(TruncateException.class, 0).hasDecision(RETRY_NEXT); + assertOnErrorResponse(TruncateException.class, 1).hasDecision(RETRY_NEXT); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java index dac4dcafe20..e36ccff2b91 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/retry/DefaultRetryPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,48 +42,48 @@ public DefaultRetryPolicyTest() { @Test public void should_process_read_timeouts() { - assertOnReadTimeout(QUORUM, 2, 2, false, 0).isEqualTo(RETRY_SAME); - assertOnReadTimeout(QUORUM, 2, 2, false, 1).isEqualTo(RETHROW); - assertOnReadTimeout(QUORUM, 2, 2, true, 0).isEqualTo(RETHROW); - assertOnReadTimeout(QUORUM, 2, 1, true, 0).isEqualTo(RETHROW); - assertOnReadTimeout(QUORUM, 2, 1, false, 0).isEqualTo(RETHROW); + assertOnReadTimeout(QUORUM, 2, 2, false, 0).hasDecision(RETRY_SAME); + assertOnReadTimeout(QUORUM, 2, 2, false, 1).hasDecision(RETHROW); + assertOnReadTimeout(QUORUM, 2, 2, true, 0).hasDecision(RETHROW); + assertOnReadTimeout(QUORUM, 2, 1, true, 0).hasDecision(RETHROW); + assertOnReadTimeout(QUORUM, 2, 1, false, 0).hasDecision(RETHROW); } @Test public void should_process_write_timeouts() { - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 0).isEqualTo(RETRY_SAME); - assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).isEqualTo(RETHROW); - assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).isEqualTo(RETHROW); + assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 0).hasDecision(RETRY_SAME); + assertOnWriteTimeout(QUORUM, BATCH_LOG, 2, 0, 1).hasDecision(RETHROW); + assertOnWriteTimeout(QUORUM, SIMPLE, 2, 0, 0).hasDecision(RETHROW); } @Test public void should_process_unavailable() { - assertOnUnavailable(QUORUM, 2, 1, 0).isEqualTo(RETRY_NEXT); - assertOnUnavailable(QUORUM, 2, 1, 1).isEqualTo(RETHROW); + assertOnUnavailable(QUORUM, 2, 1, 0).hasDecision(RETRY_NEXT); + assertOnUnavailable(QUORUM, 2, 1, 1).hasDecision(RETHROW); } @Test public void should_process_aborted_request() { - assertOnRequestAborted(ClosedConnectionException.class, 0).isEqualTo(RETRY_NEXT); - assertOnRequestAborted(ClosedConnectionException.class, 1).isEqualTo(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 0).isEqualTo(RETRY_NEXT); - assertOnRequestAborted(HeartbeatException.class, 1).isEqualTo(RETRY_NEXT); - assertOnRequestAborted(Throwable.class, 0).isEqualTo(RETHROW); + assertOnRequestAborted(ClosedConnectionException.class, 0).hasDecision(RETRY_NEXT); + assertOnRequestAborted(ClosedConnectionException.class, 1).hasDecision(RETRY_NEXT); + assertOnRequestAborted(HeartbeatException.class, 0).hasDecision(RETRY_NEXT); + assertOnRequestAborted(HeartbeatException.class, 1).hasDecision(RETRY_NEXT); + assertOnRequestAborted(Throwable.class, 0).hasDecision(RETHROW); } @Test public void should_process_error_response() { - assertOnErrorResponse(ReadFailureException.class, 0).isEqualTo(RETHROW); - assertOnErrorResponse(ReadFailureException.class, 1).isEqualTo(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 0).isEqualTo(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).isEqualTo(RETHROW); - assertOnErrorResponse(WriteFailureException.class, 1).isEqualTo(RETHROW); + assertOnErrorResponse(ReadFailureException.class, 0).hasDecision(RETHROW); + assertOnErrorResponse(ReadFailureException.class, 1).hasDecision(RETHROW); + assertOnErrorResponse(WriteFailureException.class, 0).hasDecision(RETHROW); + assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); + assertOnErrorResponse(WriteFailureException.class, 1).hasDecision(RETHROW); - assertOnErrorResponse(OverloadedException.class, 0).isEqualTo(RETRY_NEXT); - assertOnErrorResponse(OverloadedException.class, 1).isEqualTo(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 0).isEqualTo(RETRY_NEXT); - assertOnErrorResponse(ServerError.class, 1).isEqualTo(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 0).isEqualTo(RETRY_NEXT); - assertOnErrorResponse(TruncateException.class, 1).isEqualTo(RETRY_NEXT); + assertOnErrorResponse(OverloadedException.class, 0).hasDecision(RETRY_NEXT); + assertOnErrorResponse(OverloadedException.class, 1).hasDecision(RETRY_NEXT); + assertOnErrorResponse(ServerError.class, 0).hasDecision(RETRY_NEXT); + assertOnErrorResponse(ServerError.class, 1).hasDecision(RETRY_NEXT); + assertOnErrorResponse(TruncateException.class, 0).hasDecision(RETRY_NEXT); + assertOnErrorResponse(TruncateException.class, 1).hasDecision(RETRY_NEXT); } } diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java b/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java index 78c227816e9..a57f4ab352f 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/retry/RetryPolicyTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +24,8 @@ import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; import com.datastax.oss.driver.api.core.servererrors.WriteType; import com.datastax.oss.driver.api.core.session.Request; -import org.assertj.core.api.Assert; +import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryVerdict; +import org.assertj.core.api.AbstractAssert; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @@ -37,30 +40,52 @@ protected RetryPolicyTestBase(RetryPolicy policy) { this.policy = policy; } - protected Assert assertOnReadTimeout( + protected RetryVerdictAssert assertOnReadTimeout( ConsistencyLevel cl, int blockFor, int received, boolean dataPresent, int retryCount) { - return assertThat( - policy.onReadTimeout(request, cl, blockFor, received, dataPresent, retryCount)); + return new RetryVerdictAssert( + policy.onReadTimeoutVerdict(request, cl, blockFor, received, dataPresent, retryCount)); } - protected Assert assertOnWriteTimeout( + protected RetryVerdictAssert assertOnWriteTimeout( ConsistencyLevel cl, WriteType writeType, int blockFor, int received, int retryCount) { - return assertThat( - policy.onWriteTimeout(request, cl, writeType, blockFor, received, retryCount)); + return new RetryVerdictAssert( + policy.onWriteTimeoutVerdict(request, cl, writeType, blockFor, received, retryCount)); } - protected Assert assertOnUnavailable( + protected RetryVerdictAssert assertOnUnavailable( ConsistencyLevel cl, int required, int alive, int retryCount) { - return assertThat(policy.onUnavailable(request, cl, required, alive, retryCount)); + return new RetryVerdictAssert( + policy.onUnavailableVerdict(request, cl, required, alive, retryCount)); } - protected Assert assertOnRequestAborted( + protected RetryVerdictAssert assertOnRequestAborted( Class errorClass, int retryCount) { - return assertThat(policy.onRequestAborted(request, mock(errorClass), retryCount)); + return new RetryVerdictAssert( + policy.onRequestAbortedVerdict(request, mock(errorClass), retryCount)); } - protected Assert assertOnErrorResponse( + protected RetryVerdictAssert assertOnErrorResponse( Class errorClass, int retryCount) { - return assertThat(policy.onErrorResponse(request, mock(errorClass), retryCount)); + return new RetryVerdictAssert( + policy.onErrorResponseVerdict(request, mock(errorClass), retryCount)); + } + + public static class RetryVerdictAssert extends AbstractAssert { + RetryVerdictAssert(RetryVerdict actual) { + super(actual, RetryVerdictAssert.class); + } + + public RetryVerdictAssert hasDecision(RetryDecision decision) { + assertThat(actual.getRetryDecision()).isEqualTo(decision); + return this; + } + + public RetryVerdictAssert hasConsistency(ConsistencyLevel cl) { + assertThat(actual) + .isInstanceOf(ConsistencyDowngradingRetryVerdict.class) + .extracting("consistencyLevel") + .isEqualTo(cl); + return this; + } } } diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java index 7a279dfd99d..efd804fa66e 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/specex/ConstantSpeculativeExecutionPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java index 05416857057..9db93b37c91 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/type/UserDefinedTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java index ac66cfca01a..bddb8f92773 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/type/reflect/GenericTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java b/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java index da51e00f366..c547f95e67c 100644 --- a/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/api/core/uuid/UuidsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,25 +18,281 @@ package com.datastax.oss.driver.api.core.uuid; import static com.datastax.oss.driver.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.DefaultProtocolVersion; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.Arrays; import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.SplittableRandom; import java.util.UUID; import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Supplier; import org.junit.Test; +import org.junit.runner.RunWith; +@RunWith(DataProviderRunner.class) public class UuidsTest { + @Test + public void should_generate_unique_random_uuids_Random() { + Set generated = serialGeneration(1_000_000, Uuids::random); + assertThat(generated).hasSize(1_000_000); + } + + @Test + public void should_generate_unique_random_uuids_shared_Random2() { + Random random = new Random(); + Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); + assertThat(generated).hasSize(1_000_000); + } + + @Test + public void should_generate_unique_random_uuids_across_threads_shared_Random() throws Exception { + Random random = new Random(); + Set generated = parallelGeneration(10, 10_000, () -> () -> Uuids.random(random)); + assertThat(generated).hasSize(10 * 10_000); + } + + @Test + public void should_generate_unique_random_uuids_shared_SecureRandom() { + SecureRandom random = new SecureRandom(); + Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); + assertThat(generated).hasSize(1_000_000); + } + + @Test + public void should_generate_unique_random_uuids_across_threads_shared_SecureRandom() + throws Exception { + SecureRandom random = new SecureRandom(); + Set generated = parallelGeneration(10, 10_000, () -> () -> Uuids.random(random)); + assertThat(generated).hasSize(10 * 10_000); + } + + @Test + public void should_generate_unique_random_uuids_ThreadLocalRandom() { + ThreadLocalRandom random = ThreadLocalRandom.current(); + Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); + assertThat(generated).hasSize(1_000_000); + } + + @Test + public void should_generate_unique_random_uuids_across_threads_ThreadLocalRandom() + throws Exception { + Set generated = + parallelGeneration( + 10, + 10_000, + () -> { + ThreadLocalRandom random = ThreadLocalRandom.current(); + return () -> Uuids.random(random); + }); + assertThat(generated).hasSize(10 * 10_000); + } + + @Test + public void should_generate_unique_random_uuids_Netty_ThreadLocalRandom() { + io.netty.util.internal.ThreadLocalRandom random = + io.netty.util.internal.ThreadLocalRandom.current(); + Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); + assertThat(generated).hasSize(1_000_000); + } + + @Test + public void should_generate_unique_random_uuids_across_threads_Netty_ThreadLocalRandom() + throws Exception { + Set generated = + parallelGeneration( + 10, + 10_000, + () -> { + io.netty.util.internal.ThreadLocalRandom random = + io.netty.util.internal.ThreadLocalRandom.current(); + return () -> Uuids.random(random); + }); + assertThat(generated).hasSize(10 * 10_000); + } + + @Test + public void should_generate_unique_random_uuids_SplittableRandom() { + SplittableRandom random = new SplittableRandom(); + Set generated = serialGeneration(1_000_000, () -> Uuids.random(random)); + assertThat(generated).hasSize(1_000_000); + } + + @Test + public void should_generate_unique_random_uuids_across_threads_SplittableRandom() + throws Exception { + Set generated = + parallelGeneration( + 10, + 10_000, + () -> { + SplittableRandom random = new SplittableRandom(); + return () -> Uuids.random(random); + }); + assertThat(generated).hasSize(10 * 10_000); + } + + @Test + @UseDataProvider("byteArrayNames") + public void should_generate_name_based_uuid_from_namespace_and_byte_array( + UUID namespace, byte[] name) throws NoSuchAlgorithmException { + // when + UUID actual = Uuids.nameBased(namespace, name); + // then + assertThat(actual).isNotNull(); + assertThat(actual.version()).isEqualTo(3); + assertUuid(namespace, name, 3, actual); + } + + @DataProvider + public static Object[][] byteArrayNames() { + return new Object[][] { + {Uuids.NAMESPACE_DNS, new byte[] {}}, {Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}}, + }; + } + + @Test + @UseDataProvider("byteArrayNamesWithVersions") + public void should_generate_name_based_uuid_from_namespace_byte_array_and_version( + UUID namespace, byte[] name, int version) throws NoSuchAlgorithmException { + // when + UUID actual = Uuids.nameBased(namespace, name, version); + // then + assertThat(actual).isNotNull(); + assertThat(actual.version()).isEqualTo(version); + assertUuid(namespace, name, version, actual); + } + + @DataProvider + public static Object[][] byteArrayNamesWithVersions() { + return new Object[][] { + {Uuids.NAMESPACE_DNS, new byte[] {}, 3}, + {Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}, 3}, + {Uuids.NAMESPACE_OID, new byte[] {}, 5}, + {Uuids.NAMESPACE_X500, new byte[] {1, 2, 3, 4}, 5}, + }; + } + + @Test + @UseDataProvider("stringNames") + public void should_generate_name_based_uuid_from_namespace_and_string(UUID namespace, String name) + throws NoSuchAlgorithmException { + // when + UUID actual = Uuids.nameBased(namespace, name); + // then + assertThat(actual).isNotNull(); + assertThat(actual.version()).isEqualTo(3); + assertUuid(namespace, name, 3, actual); + } + + @DataProvider + public static Object[][] stringNames() { + return new Object[][] { + {Uuids.NAMESPACE_DNS, ""}, {Uuids.NAMESPACE_URL, "Hello world!"}, {Uuids.NAMESPACE_OID, "你好"}, + }; + } + + @Test + @UseDataProvider("stringNamesWithVersions") + public void should_generate_name_based_uuid_from_namespace_string_and_version( + UUID namespace, String name, int version) throws NoSuchAlgorithmException { + // when + UUID actual = Uuids.nameBased(namespace, name, version); + // then + assertThat(actual).isNotNull(); + assertThat(actual.version()).isEqualTo(version); + assertUuid(namespace, name, version, actual); + } + + @DataProvider + public static Object[][] stringNamesWithVersions() { + return new Object[][] { + {Uuids.NAMESPACE_DNS, "", 3}, + {Uuids.NAMESPACE_URL, "Hello world!", 3}, + {Uuids.NAMESPACE_OID, "你好", 3}, + {Uuids.NAMESPACE_DNS, "", 5}, + {Uuids.NAMESPACE_URL, "Hello world!", 5}, + {Uuids.NAMESPACE_OID, "你好", 5}, + }; + } + + @Test + @UseDataProvider("concatenatedData") + public void should_generate_name_based_uuid_from_concatenated_data(byte[] namespaceAndName) + throws NoSuchAlgorithmException { + // when + UUID actual = Uuids.nameBased(namespaceAndName); + // then + assertThat(actual).isNotNull(); + assertThat(actual.version()).isEqualTo(3); + assertUuid(namespaceAndName, 3, actual); + } + + @DataProvider + public static Object[][] concatenatedData() { + return new Object[][] { + {concat(Uuids.NAMESPACE_DNS, new byte[] {})}, + {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4})}, + }; + } + + @Test + @UseDataProvider("concatenatedDataWithVersions") + public void should_generate_name_based_uuid_from_concatenated_data_and_version( + byte[] namespaceAndName, int version) throws NoSuchAlgorithmException { + // when + UUID actual = Uuids.nameBased(namespaceAndName, version); + // then + assertThat(actual).isNotNull(); + assertThat(actual.version()).isEqualTo(version); + assertUuid(namespaceAndName, version, actual); + } + + @DataProvider + public static Object[][] concatenatedDataWithVersions() { + return new Object[][] { + {concat(Uuids.NAMESPACE_DNS, new byte[] {}), 3}, + {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}), 3}, + {concat(Uuids.NAMESPACE_DNS, new byte[] {}), 5}, + {concat(Uuids.NAMESPACE_URL, new byte[] {1, 2, 3, 4}), 5}, + }; + } + + @Test + public void should_throw_when_invalid_version() { + Throwable error = catchThrowable(() -> Uuids.nameBased(Uuids.NAMESPACE_URL, "irrelevant", 1)); + assertThat(error) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Invalid name-based UUID version, expecting 3 or 5, got: 1"); + } + + @Test + public void should_throw_when_invalid_data() { + Throwable error = catchThrowable(() -> Uuids.nameBased(new byte[] {1}, 3)); + assertThat(error) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("namespaceAndName must be at least 16 bytes long"); + } + @Test public void should_generate_timestamp_within_10_ms() { // The Uuids class does some computation at class initialization, which may screw up our // assumption below that Uuids.timeBased() takes less than 10ms, so force class loading now. - Uuids.random(); + Uuids.timeBased(); long start = System.currentTimeMillis(); UUID uuid = Uuids.timeBased(); @@ -51,34 +309,14 @@ public void should_generate_timestamp_within_10_ms() { @Test public void should_generate_unique_time_based_uuids() { - int count = 1_000_000; - Set generated = new HashSet<>(count); - - for (int i = 0; i < count; ++i) { - generated.add(Uuids.timeBased()); - } - - assertThat(generated).hasSize(count); + Set generated = serialGeneration(1_000_000, Uuids::timeBased); + assertThat(generated).hasSize(1_000_000); } @Test public void should_generate_unique_time_based_uuids_across_threads() throws Exception { - int threadCount = 10; - int uuidsPerThread = 10_000; - Set generated = new ConcurrentSkipListSet<>(); - - UUIDGenerator[] generators = new UUIDGenerator[threadCount]; - for (int i = 0; i < threadCount; i++) { - generators[i] = new UUIDGenerator(uuidsPerThread, generated); - } - for (int i = 0; i < threadCount; i++) { - generators[i].start(); - } - for (int i = 0; i < threadCount; i++) { - generators[i].join(); - } - - assertThat(generated).hasSize(threadCount * uuidsPerThread); + Set generated = parallelGeneration(10, 10_000, () -> Uuids::timeBased); + assertThat(generated).hasSize(10 * 10_000); } @Test @@ -101,7 +339,7 @@ public void should_generate_within_bounds_for_given_timestamp() { int uuidsPerTimestamp = 10; for (int i = 0; i < timestampsCount; i++) { - long timestamp = (long) random.nextInt(); + long timestamp = random.nextInt(); for (int j = 0; j < uuidsPerTimestamp; j++) { UUID uuid = new UUID(Uuids.makeMsb(Uuids.fromUnixTimestamp(timestamp)), random.nextLong()); assertBetween(uuid, Uuids.startOf(timestamp), Uuids.endOf(timestamp)); @@ -114,6 +352,9 @@ private static void assertBetween(UUID uuid, UUID lowerBound, UUID upperBound) { ByteBuffer uuidBytes = TypeCodecs.UUID.encode(uuid, DefaultProtocolVersion.V3); ByteBuffer lb = TypeCodecs.UUID.encode(lowerBound, DefaultProtocolVersion.V3); ByteBuffer ub = TypeCodecs.UUID.encode(upperBound, DefaultProtocolVersion.V3); + assertThat(uuidBytes).isNotNull(); + assertThat(lb).isNotNull(); + assertThat(ub).isNotNull(); assertThat(compareTimestampBytes(lb, uuidBytes)).isLessThanOrEqualTo(0); assertThat(compareTimestampBytes(ub, uuidBytes)).isGreaterThanOrEqualTo(0); } @@ -153,20 +394,102 @@ private static int compareTimestampBytes(ByteBuffer o1, ByteBuffer o2) { return (o1.get(o1Pos + 3) & 0xFF) - (o2.get(o2Pos + 3) & 0xFF); } - private static class UUIDGenerator extends Thread { + private static void assertUuid(UUID namespace, String name, int version, UUID actual) + throws NoSuchAlgorithmException { + assertUuid(namespace, name.getBytes(StandardCharsets.UTF_8), version, actual); + } + + private static void assertUuid(UUID namespace, byte[] name, int version, UUID actual) + throws NoSuchAlgorithmException { + byte[] data = digest(namespace, name, version); + assertThat(longToBytes(actual.getMostSignificantBits())) + .isEqualTo(Arrays.copyOfRange(data, 0, 8)); + assertThat(longToBytes(actual.getLeastSignificantBits())) + .isEqualTo(Arrays.copyOfRange(data, 8, 16)); + } + + private static void assertUuid(byte[] namespaceAndName, int version, UUID actual) + throws NoSuchAlgorithmException { + byte[] data = digest(namespaceAndName, version); + assertThat(longToBytes(actual.getMostSignificantBits())) + .isEqualTo(Arrays.copyOfRange(data, 0, 8)); + assertThat(longToBytes(actual.getLeastSignificantBits())) + .isEqualTo(Arrays.copyOfRange(data, 8, 16)); + } + + private static byte[] digest(UUID namespace, byte[] name, int version) + throws NoSuchAlgorithmException { + byte[] namespaceAndName = concat(namespace, name); + return digest(namespaceAndName, version); + } + + private static byte[] digest(byte[] namespaceAndName, int version) + throws NoSuchAlgorithmException { + MessageDigest result; + String algorithm = version == 3 ? "MD5" : "SHA-1"; + result = MessageDigest.getInstance(algorithm); + byte[] digest = result.digest(namespaceAndName); + digest[6] &= (byte) 0x0f; + digest[6] |= (byte) (version << 4); + digest[8] &= (byte) 0x3f; + digest[8] |= (byte) 0x80; + return digest; + } + + private static byte[] concat(UUID namespace, byte[] name) { + return ByteBuffer.allocate(16 + name.length) + .putLong(namespace.getMostSignificantBits()) + .putLong(namespace.getLeastSignificantBits()) + .put(name) + .array(); + } + + private static byte[] longToBytes(long x) { + return ByteBuffer.allocate(Long.BYTES).putLong(x).array(); + } + + private Set serialGeneration(int count, Supplier uuidSupplier) { + Set generated = new HashSet<>(count); + for (int i = 0; i < count; ++i) { + generated.add(uuidSupplier.get()); + } + return generated; + } + + public Set parallelGeneration( + int threadCount, int uuidsPerThread, Supplier> uuidSupplier) + throws InterruptedException { + Set generated = new ConcurrentSkipListSet<>(); + UuidGenerator[] generators = new UuidGenerator[threadCount]; + for (int i = 0; i < threadCount; i++) { + generators[i] = new UuidGenerator(uuidsPerThread, uuidSupplier, generated); + } + for (int i = 0; i < threadCount; i++) { + generators[i].start(); + } + for (int i = 0; i < threadCount; i++) { + generators[i].join(); + } + return generated; + } + + private static class UuidGenerator extends Thread { private final int toGenerate; private final Set generated; + private final Supplier> uuidSupplier; - UUIDGenerator(int toGenerate, Set generated) { + UuidGenerator(int toGenerate, Supplier> uuidSupplier, Set generated) { this.toGenerate = toGenerate; this.generated = generated; + this.uuidSupplier = uuidSupplier; } @Override public void run() { + Supplier uuidSupplier = this.uuidSupplier.get(); for (int i = 0; i < toGenerate; ++i) { - generated.add(Uuids.timeBased()); + generated.add(uuidSupplier.get()); } } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java index 0494aeaf539..4daf7e28eb6 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/SerializationHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java index 554e23c7720..dff9877b62d 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/AsyncPagingIterableWrapperTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -127,7 +129,7 @@ public void should_share_iteration_progress_with_wrapped_result_set() { private ExecutionInfo mockExecutionInfo() { ExecutionInfo executionInfo = mock(ExecutionInfo.class); - when(executionInfo.getStatement()).thenAnswer(invocation -> statement); + when(executionInfo.getRequest()).thenAnswer(invocation -> statement); return executionInfo; } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistryHighestCommonTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistryHighestCommonTest.java deleted file mode 100644 index 19146e6c286..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistryHighestCommonTest.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static com.datastax.oss.driver.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; -import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import java.util.Collection; -import java.util.Collections; -import org.junit.Test; - -/** - * Covers {@link CassandraProtocolVersionRegistry#highestCommon(Collection)} separately, because it - * relies explicitly on {@link DefaultProtocolVersion} as the version implementation. - */ -public class CassandraProtocolVersionRegistryHighestCommonTest { - - private CassandraProtocolVersionRegistry registry = new CassandraProtocolVersionRegistry("test"); - - @Test - public void should_pick_v3_when_at_least_one_node_is_2_1() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode("2.2.1"), mockNode("2.1.0"), mockNode("3.1.9")))) - .isEqualTo(DefaultProtocolVersion.V3); - } - - @Test - public void should_pick_v4_when_all_nodes_are_2_2_or_more() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode("2.2.0"), mockNode("2.2.1"), mockNode("3.1.9")))) - .isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - public void should_treat_rcs_as_next_stable_versions() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode("2.2.1"), mockNode("2.1.0-rc1"), mockNode("3.1.9")))) - .isEqualTo(DefaultProtocolVersion.V3); - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode("2.2.0-rc2"), mockNode("2.2.1"), mockNode("3.1.9")))) - .isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - public void should_skip_nodes_that_report_null_version() { - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode(null), mockNode("2.1.0"), mockNode("3.1.9")))) - .isEqualTo(DefaultProtocolVersion.V3); - - // Edge case: if all do, go with the latest version - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode(null), mockNode(null), mockNode(null)))) - .isEqualTo(DefaultProtocolVersion.V4); - } - - @Test - public void should_use_v4_for_future_cassandra_versions() { - // That might change in the future when some C* versions drop v4 support - assertThat( - registry.highestCommon( - ImmutableList.of(mockNode("3.0.0"), mockNode("12.1.5"), mockNode("98.7.22")))) - .isEqualTo(DefaultProtocolVersion.V4); - } - - @Test(expected = IllegalArgumentException.class) - public void should_fail_if_no_nodes() { - registry.highestCommon(Collections.emptyList()); - } - - private Node mockNode(String cassandraVersion) { - Node node = mock(Node.class); - if (cassandraVersion != null) { - when(node.getCassandraVersion()).thenReturn(Version.parse(cassandraVersion)); - } - return node; - } - - @Test(expected = UnsupportedProtocolVersionException.class) - public void should_fail_if_pre_2_1_node() { - registry.highestCommon(ImmutableList.of(mockNode("3.0.0"), mockNode("2.0.9"))); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistryTest.java deleted file mode 100644 index 0835e1c83ab..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/CassandraProtocolVersionRegistryTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.Optional; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Covers the method that are agnostic to the actual {@link ProtocolVersion} implementation (using a - * mock implementation). - */ -public class CassandraProtocolVersionRegistryTest { - - private static ProtocolVersion V3 = new MockProtocolVersion(3, false); - private static ProtocolVersion V4 = new MockProtocolVersion(4, false); - private static ProtocolVersion V5 = new MockProtocolVersion(5, false); - private static ProtocolVersion V5_BETA = new MockProtocolVersion(5, true); - private static ProtocolVersion V10 = new MockProtocolVersion(10, false); - private static ProtocolVersion V11 = new MockProtocolVersion(11, false); - - @Rule public ExpectedException expectedException = ExpectedException.none(); - - @Test - public void should_fail_if_duplicate_version_code() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Duplicate version code: 5 in V5 and V5_BETA"); - new CassandraProtocolVersionRegistry("test", new ProtocolVersion[] {V5, V5_BETA}); - } - - @Test - public void should_find_version_by_name() { - ProtocolVersionRegistry versions = - new CassandraProtocolVersionRegistry("test", new ProtocolVersion[] {V3, V4}); - assertThat(versions.fromName("V3")).isEqualTo(V3); - assertThat(versions.fromName("V4")).isEqualTo(V4); - } - - @Test - public void should_downgrade_if_lower_version_available() { - ProtocolVersionRegistry versions = - new CassandraProtocolVersionRegistry("test", new ProtocolVersion[] {V3, V4}); - Optional downgraded = versions.downgrade(V4); - downgraded.map(version -> assertThat(version).isEqualTo(V3)).orElseThrow(AssertionError::new); - } - - @Test - public void should_not_downgrade_if_no_lower_version() { - ProtocolVersionRegistry versions = - new CassandraProtocolVersionRegistry("test", new ProtocolVersion[] {V3, V4}); - Optional downgraded = versions.downgrade(V3); - assertThat(downgraded.isPresent()).isFalse(); - } - - @Test - public void should_downgrade_across_version_range() { - ProtocolVersionRegistry versions = - new CassandraProtocolVersionRegistry( - "test", new ProtocolVersion[] {V3, V4}, new ProtocolVersion[] {V10, V11}); - Optional downgraded = versions.downgrade(V10); - downgraded.map(version -> assertThat(version).isEqualTo(V4)).orElseThrow(AssertionError::new); - } - - @Test - public void should_downgrade_skipping_beta_version() { - ProtocolVersionRegistry versions = - new CassandraProtocolVersionRegistry( - "test", new ProtocolVersion[] {V4, V5_BETA}, new ProtocolVersion[] {V10, V11}); - Optional downgraded = versions.downgrade(V10); - downgraded.map(version -> assertThat(version).isEqualTo(V4)).orElseThrow(AssertionError::new); - } - - private static class MockProtocolVersion implements ProtocolVersion { - private final int code; - private final boolean beta; - - MockProtocolVersion(int code, boolean beta) { - this.code = code; - this.beta = beta; - } - - @Override - public int getCode() { - return code; - } - - @NonNull - @Override - public String name() { - return "V" + code; - } - - @Override - public boolean isBeta() { - return beta; - } - - @Override - public String toString() { - return name() + (beta ? "_BETA" : ""); - } - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java index 0484386cd8f..6c0d78d62dd 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/CompletionStageAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java index 96b396d9e40..72b875b8602 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/ContactPointsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -119,7 +121,7 @@ public void should_ignore_malformed_host_and_port_and_warn() { ContactPoints.merge(Collections.emptySet(), ImmutableList.of("foobar"), true); assertThat(endPoints).isEmpty(); - assertLog(Level.WARN, "Ignoring invalid contact point foobar (expecting host:port)"); + assertLog(Level.WARN, "Ignoring invalid contact point foobar (expecting format host:port)"); } @Test @@ -130,7 +132,7 @@ public void should_ignore_malformed_port_and_warn() { assertThat(endPoints).isEmpty(); assertLog( Level.WARN, - "Ignoring invalid contact point 127.0.0.1:foobar (expecting a number, got foobar)"); + "Ignoring invalid contact point 127.0.0.1:foobar (expecting port to be a number, got foobar)"); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java new file mode 100644 index 00000000000..1d7cc65d1f2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/DefaultProtocolVersionRegistryTest.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core; + +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V1; +import static com.datastax.dse.driver.api.core.DseProtocolVersion.DSE_V2; +import static com.datastax.oss.driver.api.core.ProtocolVersion.V3; +import static com.datastax.oss.driver.api.core.ProtocolVersion.V4; +import static com.datastax.oss.driver.api.core.ProtocolVersion.V5; +import static com.datastax.oss.driver.api.core.ProtocolVersion.V6; +import static com.datastax.oss.driver.internal.core.DefaultProtocolFeature.DATE_TYPE; +import static com.datastax.oss.driver.internal.core.DefaultProtocolFeature.SMALLINT_AND_TINYINT_TYPES; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.Optional; +import org.junit.Test; +import org.mockito.Mockito; + +/** + * Note: some tests in this class depend on the set of supported protocol versions, they will need + * to be updated as new versions are added or become non-beta. + */ +public class DefaultProtocolVersionRegistryTest { + + private DefaultProtocolVersionRegistry registry = new DefaultProtocolVersionRegistry("test"); + + @Test + public void should_find_version_by_name() { + assertThat(registry.fromName("V4")).isEqualTo(ProtocolVersion.V4); + assertThat(registry.fromName("DSE_V1")).isEqualTo(DseProtocolVersion.DSE_V1); + } + + @Test + public void should_fail_to_find_version_by_name_different_case() { + assertThatThrownBy(() -> registry.fromName("v4")).isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> registry.fromName("dse_v1")) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> registry.fromName("dDSE_v1")) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> registry.fromName("dse_v1")) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void should_downgrade_if_lower_version_available() { + Optional downgraded = registry.downgrade(V4); + downgraded.map(version -> assertThat(version).isEqualTo(V3)).orElseThrow(AssertionError::new); + } + + @Test + public void should_not_downgrade_if_no_lower_version() { + Optional downgraded = registry.downgrade(V3); + assertThat(downgraded.isPresent()).isFalse(); + } + + @Test + public void should_downgrade_from_dse_to_oss() { + assertThat(registry.downgrade(DseProtocolVersion.DSE_V1).get()).isEqualTo(ProtocolVersion.V5); + } + + @Test + public void should_pick_dse_v2_as_highest_common_when_all_nodes_are_dse_7() { + assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("7.0"), mockDseNode("7.1")))) + .isEqualTo(DseProtocolVersion.DSE_V2); + } + + @Test + public void should_pick_dse_v2_as_highest_common_when_all_nodes_are_dse_6() { + assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("6.0"), mockDseNode("6.1")))) + .isEqualTo(DseProtocolVersion.DSE_V2); + } + + @Test + public void should_pick_dse_v1_as_highest_common_when_all_nodes_are_dse_5_1_or_more() { + assertThat(registry.highestCommon(ImmutableList.of(mockDseNode("5.1"), mockDseNode("6.1")))) + .isEqualTo(DseProtocolVersion.DSE_V1); + } + + @Test + public void should_pick_oss_v4_as_highest_common_when_all_nodes_are_dse_5_or_more() { + assertThat( + registry.highestCommon( + ImmutableList.of(mockDseNode("5.0"), mockDseNode("5.1"), mockDseNode("6.1")))) + .isEqualTo(ProtocolVersion.V4); + } + + @Test + public void should_pick_oss_v3_as_highest_common_when_all_nodes_are_dse_4_7_or_more() { + assertThat( + registry.highestCommon( + ImmutableList.of(mockDseNode("4.7"), mockDseNode("5.1"), mockDseNode("6.1")))) + .isEqualTo(ProtocolVersion.V3); + } + + @Test(expected = UnsupportedProtocolVersionException.class) + public void should_fail_to_pick_highest_common_when_one_node_is_dse_4_6() { + registry.highestCommon( + ImmutableList.of(mockDseNode("4.6"), mockDseNode("5.1"), mockDseNode("6.1"))); + } + + @Test(expected = UnsupportedProtocolVersionException.class) + public void should_fail_to_pick_highest_common_when_one_node_is_2_0() { + registry.highestCommon( + ImmutableList.of(mockCassandraNode("3.0.0"), mockCassandraNode("2.0.9"))); + } + + @Test + public void should_pick_oss_v3_as_highest_common_when_one_node_is_cassandra_2_1() { + assertThat( + registry.highestCommon( + ImmutableList.of( + mockDseNode("5.1"), // oss v4 + mockDseNode("6.1"), // oss v4 + mockCassandraNode("2.1") // oss v3 + ))) + .isEqualTo(ProtocolVersion.V3); + } + + @Test + public void should_support_date_type_on_oss_v4_and_later() { + assertThat(registry.supports(V3, DATE_TYPE)).isFalse(); + assertThat(registry.supports(V4, DATE_TYPE)).isTrue(); + assertThat(registry.supports(V5, DATE_TYPE)).isTrue(); + assertThat(registry.supports(V6, DATE_TYPE)).isTrue(); + assertThat(registry.supports(DSE_V1, DATE_TYPE)).isTrue(); + assertThat(registry.supports(DSE_V2, DATE_TYPE)).isTrue(); + } + + @Test + public void should_support_smallint_and_tinyint_types_on_oss_v4_and_later() { + assertThat(registry.supports(V3, SMALLINT_AND_TINYINT_TYPES)).isFalse(); + assertThat(registry.supports(V4, SMALLINT_AND_TINYINT_TYPES)).isTrue(); + assertThat(registry.supports(V5, SMALLINT_AND_TINYINT_TYPES)).isTrue(); + assertThat(registry.supports(V6, SMALLINT_AND_TINYINT_TYPES)).isTrue(); + assertThat(registry.supports(DSE_V1, SMALLINT_AND_TINYINT_TYPES)).isTrue(); + assertThat(registry.supports(DSE_V2, SMALLINT_AND_TINYINT_TYPES)).isTrue(); + } + + private Node mockCassandraNode(String rawVersion) { + Node node = Mockito.mock(Node.class); + if (rawVersion != null) { + Mockito.when(node.getCassandraVersion()).thenReturn(Version.parse(rawVersion)); + } + return node; + } + + private Node mockDseNode(String rawDseVersion) { + Node node = Mockito.mock(Node.class); + Version dseVersion = Version.parse(rawDseVersion); + Mockito.when(node.getExtras()) + .thenReturn(ImmutableMap.of(DseNodeProperties.DSE_VERSION, dseVersion)); + + Version cassandraVersion; + if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_7_0_0) >= 0) { + cassandraVersion = Version.parse("5.0"); + } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_6_0_0) >= 0) { + cassandraVersion = Version.parse("4.0"); + } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_5_1_0) >= 0) { + cassandraVersion = Version.parse("3.11"); + } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_5_0_0) >= 0) { + cassandraVersion = Version.parse("3.0"); + } else if (dseVersion.compareTo(DefaultProtocolVersionRegistry.DSE_4_7_0) >= 0) { + cassandraVersion = Version.parse("2.1"); + } else { + cassandraVersion = Version.parse("2.0"); + } + Mockito.when(node.getCassandraVersion()).thenReturn(cassandraVersion); + + return node; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java index 8da0b24ecb2..adbe26159db 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/DriverConfigAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java b/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java new file mode 100644 index 00000000000..731c558a81f --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/MockAsyncPagingIterable.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core; + +import com.datastax.oss.driver.api.core.AsyncPagingIterable; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayDeque; +import java.util.Collections; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + +public class MockAsyncPagingIterable + implements AsyncPagingIterable> { + + private final Queue currentPage; + private final MockAsyncPagingIterable nextPage; + + public MockAsyncPagingIterable(List elements, int fetchSize, boolean addEmptyLastPage) { + if (elements.size() <= fetchSize) { + currentPage = new ArrayDeque<>(elements); + nextPage = + addEmptyLastPage + ? new MockAsyncPagingIterable<>(Collections.emptyList(), fetchSize, false) + : null; + } else { + currentPage = new ArrayDeque<>(elements.subList(0, fetchSize)); + nextPage = + new MockAsyncPagingIterable<>( + elements.subList(fetchSize, elements.size()), fetchSize, addEmptyLastPage); + } + } + + @NonNull + @Override + public Iterable currentPage() { + return currentPage; + } + + @Override + public int remaining() { + return currentPage.size(); + } + + @Override + public boolean hasMorePages() { + return nextPage != null; + } + + @NonNull + @Override + public CompletionStage> fetchNextPage() + throws IllegalStateException { + Preconditions.checkState(nextPage != null); + return CompletableFuture.completedFuture(nextPage); + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + throw new UnsupportedOperationException("irrelevant"); + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + throw new UnsupportedOperationException("irrelevant"); + } + + @Override + public boolean wasApplied() { + throw new UnsupportedOperationException("irrelevant"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java b/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java new file mode 100644 index 00000000000..885983ee98e --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/MockPagingIterable.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core; + +import com.datastax.oss.driver.api.core.PagingIterable; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Iterator; +import java.util.List; + +public class MockPagingIterable implements PagingIterable { + + private final Iterator iterator; + + public MockPagingIterable(Iterator iterator) { + this.iterator = iterator; + } + + @NonNull + @Override + public Iterator iterator() { + return iterator; + } + + @Override + public boolean isFullyFetched() { + return !iterator.hasNext(); + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + throw new UnsupportedOperationException("irrelevant"); + } + + @NonNull + @Override + public List getExecutionInfos() { + throw new UnsupportedOperationException("irrelevant"); + } + + @Override + public int getAvailableWithoutFetching() { + throw new UnsupportedOperationException("irrelevant"); + } + + @Override + public boolean wasApplied() { + throw new UnsupportedOperationException("irrelevant"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java index 0e572b89f93..15af3c61bff 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/NettyFutureAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java index 85718052928..1e7cc62f8ac 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/PagingIterableWrapperTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java b/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java index ecb84c0aced..ce028e66dbd 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/TestResponses.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +19,10 @@ import com.datastax.oss.driver.shaded.guava.common.base.Charsets; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.Lists; import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.response.Supported; import com.datastax.oss.protocol.internal.response.result.ColumnSpec; import com.datastax.oss.protocol.internal.response.result.DefaultRows; import com.datastax.oss.protocol.internal.response.result.RawType; @@ -26,6 +30,7 @@ import com.datastax.oss.protocol.internal.response.result.RowsMetadata; import java.nio.ByteBuffer; import java.util.List; +import java.util.Map; import java.util.Queue; public class TestResponses { @@ -43,4 +48,9 @@ public static Rows clusterNameResponse(String actualClusterName) { data.add(Lists.newArrayList(ByteBuffer.wrap(actualClusterName.getBytes(Charsets.UTF_8)))); return new DefaultRows(metadata, data); } + + public static Supported supportedResponse(String key, String value) { + Map> options = ImmutableMap.of(key, ImmutableList.of(value)); + return new Supported(options); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java index 2b2f7cf5eb8..2b871b3e0cc 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslatorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java new file mode 100644 index 00000000000..3bb9c4bc291 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/FixedHostNameAddressTranslatorTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; +import java.net.InetSocketAddress; +import org.junit.Test; + +public class FixedHostNameAddressTranslatorTest { + + @Test + public void should_translate_address() { + DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); + when(defaultProfile.getString(ADDRESS_TRANSLATOR_ADVERTISED_HOSTNAME)).thenReturn("myaddress"); + DefaultDriverContext defaultDriverContext = + MockedDriverContextFactory.defaultDriverContext(defaultProfile); + + FixedHostNameAddressTranslator translator = + new FixedHostNameAddressTranslator(defaultDriverContext); + InetSocketAddress address = new InetSocketAddress("192.0.2.5", 6061); + + assertThat(translator.translate(address)).isEqualTo(new InetSocketAddress("myaddress", 6061)); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java new file mode 100644 index 00000000000..bd505f5dd44 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import java.net.InetSocketAddress; +import org.junit.Test; + +public class SubnetAddressTest { + @Test + public void should_return_return_true_on_overlapping_with_another_subnet_address() { + SubnetAddress subnetAddress1 = + new SubnetAddress("100.64.0.0/15", mock(InetSocketAddress.class)); + SubnetAddress subnetAddress2 = + new SubnetAddress("100.65.0.0/16", mock(InetSocketAddress.class)); + assertThat(subnetAddress1.isOverlapping(subnetAddress2)).isTrue(); + } + + @Test + public void should_return_return_false_on_not_overlapping_with_another_subnet_address() { + SubnetAddress subnetAddress1 = + new SubnetAddress("100.64.0.0/15", mock(InetSocketAddress.class)); + SubnetAddress subnetAddress2 = + new SubnetAddress("100.66.0.0/15", mock(InetSocketAddress.class)); + assertThat(subnetAddress1.isOverlapping(subnetAddress2)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java new file mode 100644 index 00000000000..420170654dc --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetAddressTranslatorTest.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_DEFAULT_ADDRESS; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.ADDRESS_TRANSLATOR_SUBNET_ADDRESSES; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.net.InetSocketAddress; +import java.util.Map; +import org.junit.Test; + +@SuppressWarnings("resource") +public class SubnetAddressTranslatorTest { + + @Test + public void should_translate_to_correct_subnet_address_ipv4() { + Map subnetAddresses = + ImmutableMap.of( + "\"100.64.0.0/15\"", "cassandra.datacenter1.com:19042", + "100.66.0.\"0/15\"", "cassandra.datacenter2.com:19042"); + DefaultDriverContext context = context(subnetAddresses); + SubnetAddressTranslator translator = new SubnetAddressTranslator(context); + InetSocketAddress address = new InetSocketAddress("100.64.0.1", 9042); + assertThat(translator.translate(address)) + .isEqualTo(InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)); + } + + @Test + public void should_translate_to_correct_subnet_address_ipv6() { + Map subnetAddresses = + ImmutableMap.of( + "\"::ffff:6440:0/111\"", "cassandra.datacenter1.com:19042", + "\"::ffff:6442:0/111\"", "cassandra.datacenter2.com:19042"); + DefaultDriverContext context = context(subnetAddresses); + SubnetAddressTranslator translator = new SubnetAddressTranslator(context); + InetSocketAddress address = new InetSocketAddress("::ffff:6440:1", 9042); + assertThat(translator.translate(address)) + .isEqualTo(InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)); + } + + @Test + public void should_translate_to_default_address() { + DefaultDriverContext context = context(ImmutableMap.of()); + when(context + .getConfig() + .getDefaultProfile() + .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) + .thenReturn("cassandra.com:19042"); + SubnetAddressTranslator translator = new SubnetAddressTranslator(context); + InetSocketAddress address = new InetSocketAddress("100.68.0.1", 9042); + assertThat(translator.translate(address)) + .isEqualTo(InetSocketAddress.createUnresolved("cassandra.com", 19042)); + } + + @Test + public void should_pass_through_not_matched_address() { + DefaultDriverContext context = context(ImmutableMap.of()); + SubnetAddressTranslator translator = new SubnetAddressTranslator(context); + InetSocketAddress address = new InetSocketAddress("100.68.0.1", 9042); + assertThat(translator.translate(address)).isEqualTo(address); + } + + @Test + public void should_fail_on_intersecting_subnets_ipv4() { + Map subnetAddresses = + ImmutableMap.of( + "\"100.64.0.0/15\"", "cassandra.datacenter1.com:19042", + "100.65.0.\"0/16\"", "cassandra.datacenter2.com:19042"); + DefaultDriverContext context = context(subnetAddresses); + assertThatIllegalArgumentException() + .isThrownBy(() -> new SubnetAddressTranslator(context)) + .withMessage( + "Configured subnets are overlapping: " + + String.format( + "SubnetAddress[subnet=[100, 64, 0, 0], address=%s], ", + InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)) + + String.format( + "SubnetAddress[subnet=[100, 65, 0, 0], address=%s]", + InetSocketAddress.createUnresolved("cassandra.datacenter2.com", 19042))); + } + + @Test + public void should_fail_on_intersecting_subnets_ipv6() { + Map subnetAddresses = + ImmutableMap.of( + "\"::ffff:6440:0/111\"", "cassandra.datacenter1.com:19042", + "\"::ffff:6441:0/112\"", "cassandra.datacenter2.com:19042"); + DefaultDriverContext context = context(subnetAddresses); + assertThatIllegalArgumentException() + .isThrownBy(() -> new SubnetAddressTranslator(context)) + .withMessage( + "Configured subnets are overlapping: " + + String.format( + "SubnetAddress[subnet=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0], address=%s], ", + InetSocketAddress.createUnresolved("cassandra.datacenter1.com", 19042)) + + String.format( + "SubnetAddress[subnet=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 65, 0, 0], address=%s]", + InetSocketAddress.createUnresolved("cassandra.datacenter2.com", 19042))); + } + + @Test + public void should_fail_on_subnet_address_without_port() { + Map subnetAddresses = + ImmutableMap.of("\"100.64.0.0/15\"", "cassandra.datacenter1.com"); + DefaultDriverContext context = context(subnetAddresses); + assertThatIllegalArgumentException() + .isThrownBy(() -> new SubnetAddressTranslator(context)) + .withMessage("Invalid address cassandra.datacenter1.com (expecting format host:port)"); + } + + @Test + public void should_fail_on_default_address_without_port() { + DefaultDriverContext context = context(ImmutableMap.of()); + when(context + .getConfig() + .getDefaultProfile() + .getString(ADDRESS_TRANSLATOR_DEFAULT_ADDRESS, null)) + .thenReturn("cassandra.com"); + assertThatIllegalArgumentException() + .isThrownBy(() -> new SubnetAddressTranslator(context)) + .withMessage("Invalid address cassandra.com (expecting format host:port)"); + } + + private static DefaultDriverContext context(Map subnetAddresses) { + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + when(profile.getStringMap(ADDRESS_TRANSLATOR_SUBNET_ADDRESSES)).thenReturn(subnetAddresses); + return MockedDriverContextFactory.defaultDriverContext(profile); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java new file mode 100644 index 00000000000..f8ba8929e9e --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/addresstranslation/SubnetTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.addresstranslation; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.assertj.core.api.Assertions.assertThatNoException; + +import java.net.UnknownHostException; +import org.junit.Test; + +public class SubnetTest { + @Test + public void should_parse_to_correct_ipv4_subnet() throws UnknownHostException { + Subnet subnet = Subnet.parse("100.64.0.0/15"); + assertThat(subnet.getSubnet()).containsExactly(100, 64, 0, 0); + assertThat(subnet.getNetworkMask()).containsExactly(255, 254, 0, 0); + assertThat(subnet.getUpper()).containsExactly(100, 65, 255, 255); + assertThat(subnet.getLower()).containsExactly(100, 64, 0, 0); + } + + @Test + public void should_parse_to_correct_ipv6_subnet() throws UnknownHostException { + Subnet subnet = Subnet.parse("2001:db8:85a3::8a2e:370:0/111"); + assertThat(subnet.getSubnet()) + .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 112, 0, 0); + assertThat(subnet.getNetworkMask()) + .containsExactly( + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 0, 0); + assertThat(subnet.getUpper()) + .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 113, 255, 255); + assertThat(subnet.getLower()) + .containsExactly(32, 1, 13, 184, 133, 163, 0, 0, 0, 0, 138, 46, 3, 112, 0, 0); + } + + @Test + public void should_parse_to_correct_ipv6_subnet_ipv4_convertible() throws UnknownHostException { + Subnet subnet = Subnet.parse("::ffff:6440:0/111"); + assertThat(subnet.getSubnet()) + .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0); + assertThat(subnet.getNetworkMask()) + .containsExactly( + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 0, 0); + assertThat(subnet.getUpper()) + .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 65, 255, 255); + assertThat(subnet.getLower()) + .containsExactly(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 100, 64, 0, 0); + } + + @Test + public void should_fail_on_invalid_cidr_format() { + assertThatIllegalArgumentException() + .isThrownBy(() -> Subnet.parse("invalid")) + .withMessage("Invalid subnet: invalid"); + } + + @Test + public void should_parse_bounding_prefix_lengths_correctly() { + assertThatNoException().isThrownBy(() -> Subnet.parse("0.0.0.0/0")); + assertThatNoException().isThrownBy(() -> Subnet.parse("100.64.0.0/32")); + } + + @Test + public void should_fail_on_invalid_prefix_length() { + assertThatIllegalArgumentException() + .isThrownBy(() -> Subnet.parse("100.64.0.0/-1")) + .withMessage("Prefix length -1 must be within [0; 32]"); + assertThatIllegalArgumentException() + .isThrownBy(() -> Subnet.parse("100.64.0.0/33")) + .withMessage("Prefix length 33 must be within [0; 32]"); + } + + @Test + public void should_fail_on_not_prefix_block_subnet_ipv4() { + assertThatIllegalArgumentException() + .isThrownBy(() -> Subnet.parse("100.65.0.0/15")) + .withMessage("Subnet 100.65.0.0/15 must be represented as a network prefix block"); + } + + @Test + public void should_fail_on_not_prefix_block_subnet_ipv6() { + assertThatIllegalArgumentException() + .isThrownBy(() -> Subnet.parse("::ffff:6441:0/111")) + .withMessage("Subnet ::ffff:6441:0/111 must be represented as a network prefix block"); + } + + @Test + public void should_return_true_on_containing_address() throws UnknownHostException { + Subnet subnet = Subnet.parse("100.64.0.0/15"); + assertThat(subnet.contains(new byte[] {100, 64, 0, 0})).isTrue(); + assertThat(subnet.contains(new byte[] {100, 65, (byte) 255, (byte) 255})).isTrue(); + assertThat(subnet.contains(new byte[] {100, 65, 100, 100})).isTrue(); + } + + @Test + public void should_return_false_on_not_containing_address() throws UnknownHostException { + Subnet subnet = Subnet.parse("100.64.0.0/15"); + assertThat(subnet.contains(new byte[] {100, 63, (byte) 255, (byte) 255})).isFalse(); + assertThat(subnet.contains(new byte[] {100, 66, 0, 0})).isFalse(); + // IPv6 cannot be contained by IPv4 subnet. + assertThat(subnet.contains(new byte[16])).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java index 1f9ad10478a..a1eab41b998 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryAvailableIdsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -69,6 +71,7 @@ public void should_report_available_ids() { assertThat(channel.getAvailableIds()).isEqualTo(128); // Write a request, should decrease the count + assertThat(channel.preAcquireId()).isTrue(); Future writeFuture = channel.write(new Query("test"), false, Frame.NO_PAYLOAD, responseCallback); assertThat(writeFuture) diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java index f61b0501c61..d9793247c9c 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryClusterNameTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,12 +43,14 @@ public void should_set_cluster_name_from_first_connection() { factory.connect( SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); + writeInboundFrame( + readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); writeInboundFrame(readOutboundFrame(), new Ready()); writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); // Then assertThatStage(channelFuture).isSuccess(); - assertThat(factory.clusterName).isEqualTo("mockClusterName"); + assertThat(factory.getClusterName()).isEqualTo("mockClusterName"); } @Test @@ -61,6 +65,8 @@ public void should_check_cluster_name_for_next_connections() throws Throwable { factory.connect( SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); // open a first connection that will define the cluster name + writeInboundFrame( + readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); writeInboundFrame(readOutboundFrame(), new Ready()); writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); assertThatStage(channelFuture).isSuccess(); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java index 500c665cdd7..b9738a140c0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryProtocolNegotiationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,7 @@ import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; import com.datastax.oss.protocol.internal.Frame; import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.request.Options; import com.datastax.oss.protocol.internal.response.Error; import com.datastax.oss.protocol.internal.response.Ready; import com.tngtech.java.junit.dataprovider.DataProvider; @@ -72,6 +75,10 @@ public void should_fail_if_version_specified_and_not_supported_by_server(int err SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + requestFrame = readOutboundFrame(); assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); // Server does not support v4 writeInboundFrame( @@ -89,6 +96,44 @@ public void should_fail_if_version_specified_and_not_supported_by_server(int err }); } + @Test + public void should_fail_if_version_specified_and_considered_beta_by_server() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(true); + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn("V5"); + when(protocolVersionRegistry.fromName("V5")).thenReturn(DefaultProtocolVersion.V5); + ChannelFactory factory = newChannelFactory(); + + // When + CompletionStage channelFuture = + factory.connect( + SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); + + Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + requestFrame = readOutboundFrame(); + assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V5.getCode()); + // Server considers v5 beta, e.g. C* 3.10 or 3.11 + writeInboundFrame( + requestFrame, + new Error( + ProtocolConstants.ErrorCode.PROTOCOL_ERROR, + "Beta version of the protocol used (5/v5-beta), but USE_BETA flag is unset")); + + // Then + assertThatStage(channelFuture) + .isFailed( + e -> { + assertThat(e) + .isInstanceOf(UnsupportedProtocolVersionException.class) + .hasMessageContaining("Host does not support protocol version V5"); + assertThat(((UnsupportedProtocolVersionException) e).getAttemptedVersions()) + .containsExactly(DefaultProtocolVersion.V5); + }); + } + @Test public void should_succeed_if_version_not_specified_and_server_supports_latest_supported() { // Given @@ -102,6 +147,10 @@ public void should_succeed_if_version_not_specified_and_server_supports_latest_s SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + requestFrame = readOutboundFrame(); assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); writeInboundFrame(requestFrame, new Ready()); @@ -130,6 +179,10 @@ public void should_negotiate_if_version_not_specified_and_server_supports_legacy SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + requestFrame = readOutboundFrame(); assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); // Server does not support v4 writeInboundFrame( @@ -137,6 +190,10 @@ public void should_negotiate_if_version_not_specified_and_server_supports_legacy // Then // Factory should initialize a new connection, that retries with the lower version + requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + requestFrame = readOutboundFrame(); assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V3.getCode()); writeInboundFrame(requestFrame, new Ready()); @@ -165,12 +222,20 @@ public void should_fail_if_negotiation_finds_no_matching_version(int errorCode) SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + requestFrame = readOutboundFrame(); assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V4.getCode()); // Server does not support v4 writeInboundFrame( requestFrame, new Error(errorCode, "Invalid or unsupported protocol version")); // Client retries with v3 + requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + requestFrame = readOutboundFrame(); assertThat(requestFrame.protocolVersion).isEqualTo(DefaultProtocolVersion.V3.getCode()); // Server does not support v3 diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java new file mode 100644 index 00000000000..559e11e0bc2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactorySupportedOptionsTest.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.channel; + +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.internal.core.TestResponses; +import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; +import com.datastax.oss.protocol.internal.response.Ready; +import java.util.concurrent.CompletionStage; +import org.junit.Test; + +public class ChannelFactorySupportedOptionsTest extends ChannelFactoryTestBase { + + @Test + public void should_query_supported_options_on_first_channel() throws Throwable { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_VERSION)).thenReturn(false); + when(protocolVersionRegistry.highestNonBeta()).thenReturn(DefaultProtocolVersion.V4); + ChannelFactory factory = newChannelFactory(); + + // When + CompletionStage channelFuture1 = + factory.connect( + SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); + writeInboundFrame( + readOutboundFrame(), TestResponses.supportedResponse("mock_key", "mock_value")); + writeInboundFrame(readOutboundFrame(), new Ready()); + writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); + + // Then + assertThatStage(channelFuture1).isSuccess(); + DriverChannel channel1 = channelFuture1.toCompletableFuture().get(); + assertThat(channel1.getOptions()).containsKey("mock_key"); + assertThat(channel1.getOptions().get("mock_key")).containsOnly("mock_value"); + + // When + CompletionStage channelFuture2 = + factory.connect( + SERVER_ADDRESS, DriverChannelOptions.DEFAULT, NoopNodeMetricUpdater.INSTANCE); + writeInboundFrame(readOutboundFrame(), new Ready()); + writeInboundFrame(readOutboundFrame(), TestResponses.clusterNameResponse("mockClusterName")); + + // Then + assertThatStage(channelFuture2).isSuccess(); + DriverChannel channel2 = channelFuture2.toCompletableFuture().get(); + assertThat(channel2.getOptions()).isNull(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java index afcb507bfad..b25a1e9ad71 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelFactoryTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.channel; import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; import static org.mockito.Mockito.when; @@ -35,6 +38,8 @@ import com.datastax.oss.protocol.internal.Frame; import com.datastax.oss.protocol.internal.FrameCodec; import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.request.Options; +import com.datastax.oss.protocol.internal.request.Startup; import com.datastax.oss.protocol.internal.response.Ready; import com.tngtech.java.junit.dataprovider.DataProviderRunner; import io.netty.bootstrap.ServerBootstrap; @@ -120,6 +125,8 @@ public void setup() throws InterruptedException { when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS)).thenReturn(1); when(defaultProfile.getDuration(DefaultDriverOption.HEARTBEAT_INTERVAL)) .thenReturn(Duration.ofSeconds(30)); + when(defaultProfile.getDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT)) + .thenReturn(Duration.ofSeconds(5)); when(context.getProtocolVersionRegistry()).thenReturn(protocolVersionRegistry); when(context.getNettyOptions()).thenReturn(nettyOptions); @@ -200,6 +207,11 @@ private void writeInboundFrame(Frame requestFrame, Message response, int protoco */ protected void completeSimpleChannelInit() { Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Startup.class); writeInboundFrame(requestFrame, new Ready()); requestFrame = readOutboundFrame(); @@ -252,8 +264,17 @@ protected void initChannel(Channel channel) throws Exception { HeartbeatHandler heartbeatHandler = new HeartbeatHandler(defaultProfile); ProtocolInitHandler initHandler = new ProtocolInitHandler( - context, protocolVersion, clusterName, endPoint, options, heartbeatHandler); - channel.pipeline().addLast("inflight", inFlightHandler).addLast("init", initHandler); + context, + protocolVersion, + getClusterName(), + endPoint, + options, + heartbeatHandler, + productType == null); + channel + .pipeline() + .addLast(ChannelFactory.INFLIGHT_HANDLER_NAME, inFlightHandler) + .addLast(ChannelFactory.INIT_HANDLER_NAME, initHandler); } catch (Throwable t) { resultFuture.completeExceptionally(t); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java index d0da5377c5f..5feb85a457b 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ChannelHandlerTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java index ba5fd28aeb5..6024ed26a5c 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ConnectInitHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java index 75ebcab9efa..e0660b9609e 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/DriverChannelTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -62,7 +64,7 @@ public void setup() { writeCoalescer = new MockWriteCoalescer(); driverChannel = new DriverChannel( - new EmbeddedEndPoint(channel), channel, writeCoalescer, DefaultProtocolVersion.V3); + new EmbeddedEndPoint(), channel, writeCoalescer, DefaultProtocolVersion.V3); } /** diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java index cb4b5071b18..5e463299a66 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/EmbeddedEndPoint.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,18 +19,11 @@ import com.datastax.oss.driver.api.core.metadata.EndPoint; import edu.umd.cs.findbugs.annotations.NonNull; -import io.netty.channel.embedded.EmbeddedChannel; import java.net.SocketAddress; /** Endpoint implementation for unit tests that use an embedded Netty channel. */ public class EmbeddedEndPoint implements EndPoint { - private final SocketAddress address; - - public EmbeddedEndPoint(EmbeddedChannel channel) { - this.address = channel.remoteAddress(); - } - @NonNull @Override public SocketAddress resolve() { diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java index 7b8c7f870ce..35049e99af1 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/InFlightHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,7 +39,9 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelPromise; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.concurrent.TimeUnit; import org.junit.Before; import org.junit.Test; @@ -57,6 +61,7 @@ public class InFlightHandlerTest extends ChannelHandlerTestBase { public void setup() { super.setup(); MockitoAnnotations.initMocks(this); + when(streamIds.preAcquire()).thenReturn(true); } @Test @@ -253,7 +258,7 @@ public void should_refuse_new_writes_during_graceful_close() { } @Test - public void should_close_gracefully_if_orphan_ids_above_max_and_pending_requests() { + public void should_close_gracefully_if_orphan_ids_above_max_and_pending_request() { // Given addToPipeline(); // Generate n orphan ids by writing and cancelling the requests: @@ -308,6 +313,65 @@ public void should_close_gracefully_if_orphan_ids_above_max_and_pending_requests assertThat(channel.closeFuture()).isSuccess(); } + @Test + public void should_close_gracefully_if_orphan_ids_above_max_and_multiple_pending_requests() { + // Given + addToPipeline(); + // Generate n orphan ids by writing and cancelling the requests. + for (int i = 0; i < MAX_ORPHAN_IDS; i++) { + when(streamIds.acquire()).thenReturn(i); + MockResponseCallback responseCallback = new MockResponseCallback(); + channel + .writeAndFlush( + new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) + .awaitUninterruptibly(); + channel.writeAndFlush(responseCallback).awaitUninterruptibly(); + } + // Generate 3 additional requests that are pending and not cancelled. + List pendingResponseCallbacks = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + when(streamIds.acquire()).thenReturn(MAX_ORPHAN_IDS + i); + MockResponseCallback responseCallback = new MockResponseCallback(); + channel + .writeAndFlush( + new DriverChannel.RequestMessage(QUERY, false, Frame.NO_PAYLOAD, responseCallback)) + .awaitUninterruptibly(); + pendingResponseCallbacks.add(responseCallback); + } + + // When + // Generate the n+1th orphan id that makes us go above the threshold by canceling one if the + // pending requests. + channel.writeAndFlush(pendingResponseCallbacks.remove(0)).awaitUninterruptibly(); + + // Then + // Channel should be closing gracefully but there's no way to observe that from the outside + // besides writing another request and check that it's rejected. + assertThat(channel.closeFuture()).isNotDone(); + ChannelFuture otherWriteFuture = + channel.writeAndFlush( + new DriverChannel.RequestMessage( + QUERY, false, Frame.NO_PAYLOAD, new MockResponseCallback())); + assertThat(otherWriteFuture).isFailed(); + assertThat(otherWriteFuture.cause()) + .isInstanceOf(IllegalStateException.class) + .hasMessage("Channel is closing"); + + // When + // Cancel the remaining pending requests causing the n+ith orphan ids above the threshold. + for (MockResponseCallback pendingResponseCallback : pendingResponseCallbacks) { + ChannelFuture future = channel.writeAndFlush(pendingResponseCallback).awaitUninterruptibly(); + + // Then + // The future should succeed even though the channel has started closing gracefully. + assertThat(future).isSuccess(); + } + + // Then + // The graceful shutdown completes. + assertThat(channel.closeFuture()).isSuccess(); + } + @Test public void should_close_immediately_if_orphan_ids_above_max_and_no_pending_requests() { // Given diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java index a82ddf1115a..c90731eece9 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/LocalEndPoint.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java index 26db7c768b4..6015203ed38 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockAuthenticator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java index 8379585ddf4..43768131108 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockChannelFactoryHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java index d8f5604ac72..8774ee3e298 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/MockResponseCallback.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java index 5b134f9bc26..2fd12fc9f94 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/ProtocolInitHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,10 +18,16 @@ package com.datastax.oss.driver.internal.core.channel; import static com.datastax.oss.driver.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.DefaultProtocolVersion; import com.datastax.oss.driver.api.core.InvalidKeyspaceException; @@ -29,7 +37,7 @@ import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.CassandraProtocolVersionRegistry; +import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.TestResponses; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; @@ -38,6 +46,7 @@ import com.datastax.oss.protocol.internal.Frame; import com.datastax.oss.protocol.internal.ProtocolConstants; import com.datastax.oss.protocol.internal.request.AuthResponse; +import com.datastax.oss.protocol.internal.request.Options; import com.datastax.oss.protocol.internal.request.Query; import com.datastax.oss.protocol.internal.request.Register; import com.datastax.oss.protocol.internal.request.Startup; @@ -49,15 +58,19 @@ import com.datastax.oss.protocol.internal.response.result.SetKeyspace; import com.datastax.oss.protocol.internal.util.Bytes; import io.netty.channel.ChannelFuture; +import java.io.IOException; import java.net.InetSocketAddress; import java.time.Duration; +import java.util.ConcurrentModificationException; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.slf4j.LoggerFactory; public class ProtocolInitHandlerTest extends ChannelHandlerTestBase { @@ -69,9 +82,10 @@ public class ProtocolInitHandlerTest extends ChannelHandlerTestBase { @Mock private InternalDriverContext internalDriverContext; @Mock private DriverConfig driverConfig; @Mock private DriverExecutionProfile defaultProfile; + @Mock private Appender appender; private ProtocolVersionRegistry protocolVersionRegistry = - new CassandraProtocolVersionRegistry("test"); + new DefaultProtocolVersionRegistry("test"); private HeartbeatHandler heartbeatHandler; @Before @@ -90,7 +104,7 @@ public void setup() { channel .pipeline() .addLast( - "inflight", + ChannelFactory.INFLIGHT_HANDLER_NAME, new InFlightHandler( DefaultProtocolVersion.V4, new StreamIdGenerator(100), @@ -108,14 +122,15 @@ public void should_initialize() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -136,6 +151,52 @@ public void should_initialize() { assertThat(connectFuture).isSuccess(); } + @Test + public void should_query_supported_options() { + channel + .pipeline() + .addLast( + ChannelFactory.INIT_HANDLER_NAME, + new ProtocolInitHandler( + internalDriverContext, + DefaultProtocolVersion.V4, + null, + END_POINT, + DriverChannelOptions.DEFAULT, + heartbeatHandler, + true)); + + ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); + + // It should send an OPTIONS message + Frame requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Options.class); + assertThat(connectFuture).isNotDone(); + + // Simulate the SUPPORTED response + writeInboundFrame(requestFrame, TestResponses.supportedResponse("mock_key", "mock_value")); + + Map> supportedOptions = channel.attr(DriverChannel.OPTIONS_KEY).get(); + assertThat(supportedOptions).containsKey("mock_key"); + assertThat(supportedOptions.get("mock_key")).containsOnly("mock_value"); + + // It should send a STARTUP message + requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Startup.class); + assertThat(connectFuture).isNotDone(); + + // Simulate a READY response + writeInboundFrame(buildInboundFrame(requestFrame, new Ready())); + + // Simulate the cluster name check + requestFrame = readOutboundFrame(); + assertThat(requestFrame.message).isInstanceOf(Query.class); + writeInboundFrame(requestFrame, TestResponses.clusterNameResponse("someClusterName")); + + // Init should complete + assertThat(connectFuture).isSuccess(); + } + @Test public void should_add_heartbeat_handler_to_pipeline_on_success() { ProtocolInitHandler protocolInitHandler = @@ -145,14 +206,15 @@ public void should_add_heartbeat_handler_to_pipeline_on_success() { null, END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler); + heartbeatHandler, + false); - channel.pipeline().addLast("init", protocolInitHandler); + channel.pipeline().addLast(ChannelFactory.INIT_HANDLER_NAME, protocolInitHandler); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); // heartbeat should initially not be in pipeline - assertThat(channel.pipeline().get("heartbeat")).isNull(); + assertThat(channel.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME)).isNull(); // It should send a STARTUP message Frame requestFrame = readOutboundFrame(); @@ -171,7 +233,8 @@ public void should_add_heartbeat_handler_to_pipeline_on_success() { assertThat(connectFuture).isSuccess(); // should have added heartbeat handler to pipeline. - assertThat(channel.pipeline().get("heartbeat")).isEqualTo(heartbeatHandler); + assertThat(channel.pipeline().get(ChannelFactory.HEARTBEAT_HANDLER_NAME)) + .isEqualTo(heartbeatHandler); // should have removed itself from pipeline. assertThat(channel.pipeline().last()).isNotEqualTo(protocolInitHandler); } @@ -181,14 +244,15 @@ public void should_fail_to_initialize_if_init_query_times_out() throws Interrupt channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -206,14 +270,15 @@ public void should_initialize_with_authentication() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); String serverAuthenticator = "mockServerAuthenticator"; AuthProvider authProvider = mock(AuthProvider.class); @@ -270,14 +335,15 @@ public void should_invoke_auth_provider_when_server_does_not_send_challenge() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); AuthProvider authProvider = mock(AuthProvider.class); when(internalDriverContext.getAuthProvider()).thenReturn(Optional.of(authProvider)); @@ -303,14 +369,15 @@ public void should_fail_to_initialize_if_server_sends_auth_error() throws Throwa channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); String serverAuthenticator = "mockServerAuthenticator"; AuthProvider authProvider = mock(AuthProvider.class); @@ -340,7 +407,7 @@ public void should_fail_to_initialize_if_server_sends_auth_error() throws Throwa .isInstanceOf(AuthenticationException.class) .hasMessage( String.format( - "Authentication error on node %s: server replied 'mock error'", + "Authentication error on node %s: server replied with 'mock error' to AuthResponse request", END_POINT))); } @@ -349,14 +416,15 @@ public void should_check_cluster_name_if_provided() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, "expectedClusterName", END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -379,14 +447,15 @@ public void should_fail_to_initialize_if_cluster_name_does_not_match() throws Th channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, "expectedClusterName", END_POINT, DriverChannelOptions.DEFAULT, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -412,14 +481,15 @@ public void should_initialize_with_keyspace() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, options, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -443,14 +513,15 @@ public void should_initialize_with_events() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, driverChannelOptions, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -477,14 +548,15 @@ public void should_initialize_with_keyspace_and_events() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, driverChannelOptions, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -511,14 +583,15 @@ public void should_fail_to_initialize_if_keyspace_is_invalid() { channel .pipeline() .addLast( - "init", + ChannelFactory.INIT_HANDLER_NAME, new ProtocolInitHandler( internalDriverContext, DefaultProtocolVersion.V4, null, END_POINT, driverChannelOptions, - heartbeatHandler)); + heartbeatHandler, + false)); ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); @@ -538,4 +611,42 @@ public void should_fail_to_initialize_if_keyspace_is_invalid() { .isInstanceOf(InvalidKeyspaceException.class) .hasMessage("invalid keyspace")); } + + /** + * This covers a corner case where {@code abortAllInFlight} was recursing into itself, causing a + * {@link ConcurrentModificationException}. This was recoverable but caused Netty to generate a + * warning log. + * + * @see JAVA-2838 + */ + @Test + public void should_fail_pending_requests_only_once_if_init_fails() { + Logger logger = + (Logger) LoggerFactory.getLogger("io.netty.channel.AbstractChannelHandlerContext"); + Level levelBefore = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(appender); + + channel + .pipeline() + .addLast( + "init", + new ProtocolInitHandler( + internalDriverContext, + DefaultProtocolVersion.V4, + null, + END_POINT, + DriverChannelOptions.DEFAULT, + heartbeatHandler, + false)); + + ChannelFuture connectFuture = channel.connect(new InetSocketAddress("localhost", 9042)); + channel.pipeline().fireExceptionCaught(new IOException("Mock I/O exception")); + assertThat(connectFuture).isFailed(); + + verify(appender, never()).doAppend(any(ILoggingEvent.class)); + + logger.detachAppender(appender); + logger.setLevel(levelBefore); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java index 7bbbf23c329..83802884c45 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/channel/StreamIdGeneratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,6 +32,7 @@ public void should_have_all_available_upon_creation() { public void should_return_available_ids_in_sequence() { StreamIdGenerator generator = new StreamIdGenerator(8); for (int i = 0; i < 8; i++) { + assertThat(generator.preAcquire()).isTrue(); assertThat(generator.acquire()).isEqualTo(i); assertThat(generator.getAvailableIds()).isEqualTo(7 - i); } @@ -39,23 +42,28 @@ public void should_return_available_ids_in_sequence() { public void should_return_minus_one_when_no_id_available() { StreamIdGenerator generator = new StreamIdGenerator(8); for (int i = 0; i < 8; i++) { - generator.acquire(); + assertThat(generator.preAcquire()).isTrue(); + // also validating that ids are held as soon as preAcquire() is called, even if acquire() has + // not been invoked yet } assertThat(generator.getAvailableIds()).isEqualTo(0); - assertThat(generator.acquire()).isEqualTo(-1); + assertThat(generator.preAcquire()).isFalse(); } @Test public void should_return_previously_released_ids() { StreamIdGenerator generator = new StreamIdGenerator(8); for (int i = 0; i < 8; i++) { - generator.acquire(); + assertThat(generator.preAcquire()).isTrue(); + assertThat(generator.acquire()).isEqualTo(i); } generator.release(7); generator.release(2); assertThat(generator.getAvailableIds()).isEqualTo(2); + assertThat(generator.preAcquire()).isTrue(); assertThat(generator.acquire()).isEqualTo(2); + assertThat(generator.preAcquire()).isTrue(); assertThat(generator.acquire()).isEqualTo(7); - assertThat(generator.acquire()).isEqualTo(-1); + assertThat(generator.preAcquire()).isFalse(); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/MockOptions.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java similarity index 52% rename from core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/MockOptions.java rename to core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java index c6870f40802..cee57abbfdf 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/MockOptions.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,15 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.internal.core.config.typesafe; +package com.datastax.oss.driver.internal.core.config; import com.datastax.oss.driver.api.core.config.DriverOption; import edu.umd.cs.findbugs.annotations.NonNull; -enum MockOptions implements DriverOption { +public enum MockOptions implements DriverOption { INT1("int1"), INT2("int2"), AUTH_PROVIDER("auth_provider"), + SUBNET_ADDRESSES("subnet_addresses"), ; private final String path; diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java new file mode 100644 index 00000000000..ecad298aa37 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/MockTypedOptions.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config; + +import com.datastax.oss.driver.api.core.config.TypedDriverOption; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; + +public class MockTypedOptions { + public static final TypedDriverOption INT1 = + new TypedDriverOption<>(MockOptions.INT1, GenericType.INTEGER); + public static final TypedDriverOption INT2 = + new TypedDriverOption<>(MockOptions.INT2, GenericType.INTEGER); +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java new file mode 100644 index 00000000000..a0db82d298e --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/cloud/CloudConfigFactoryTest.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.cloud; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.datastax.oss.driver.internal.core.ssl.SniSslEngineFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.github.tomakehurst.wiremock.common.JettySettings; +import com.github.tomakehurst.wiremock.core.Options; +import com.github.tomakehurst.wiremock.http.AdminRequestHandler; +import com.github.tomakehurst.wiremock.http.HttpServer; +import com.github.tomakehurst.wiremock.http.HttpServerFactory; +import com.github.tomakehurst.wiremock.http.StubRequestHandler; +import com.github.tomakehurst.wiremock.jetty9.JettyHttpServer; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.google.common.base.Joiner; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import org.eclipse.jetty.io.NetworkTrafficListener; +import org.eclipse.jetty.server.ConnectionFactory; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.SslConnectionFactory; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class CloudConfigFactoryTest { + + private static final String BUNDLE_PATH = "/config/cloud/creds.zip"; + + @Rule + public WireMockRule wireMockRule = + new WireMockRule( + wireMockConfig() + .httpsPort(30443) + .dynamicPort() + .httpServerFactory(new HttpsServerFactory()) + .needClientAuth(true) + .keystorePath(path("/config/cloud/identity.jks").toString()) + .keystorePassword("fakePasswordForTests") + .trustStorePath(path("/config/cloud/trustStore.jks").toString()) + .trustStorePassword("fakePasswordForTests2")); + + public CloudConfigFactoryTest() throws URISyntaxException {} + + @Test + public void should_load_config_from_local_filesystem() throws Exception { + // given + URL configFile = getClass().getResource(BUNDLE_PATH); + mockProxyMetadataService(jsonMetadata()); + // when + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile); + // then + assertCloudConfig(cloudConfig); + } + + @Test + public void should_load_config_from_external_location() throws Exception { + // given + mockHttpSecureBundle(secureBundle()); + mockProxyMetadataService(jsonMetadata()); + // when + URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + CloudConfig cloudConfig = cloudConfigFactory.createCloudConfig(configFile); + // then + assertCloudConfig(cloudConfig); + } + + @Test + public void should_throw_when_bundle_not_found() throws Exception { + // given + stubFor(any(urlEqualTo(BUNDLE_PATH)).willReturn(aResponse().withStatus(404))); + // when + URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); + assertThat(t) + .isInstanceOf(FileNotFoundException.class) + .hasMessageContaining(configFile.toExternalForm()); + } + + @Test + public void should_throw_when_bundle_not_readable() throws Exception { + // given + mockHttpSecureBundle("not a zip file".getBytes(StandardCharsets.UTF_8)); + // when + URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); + assertThat(t) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("Invalid bundle: missing file config.json"); + } + + @Test + public void should_throw_when_metadata_not_found() throws Exception { + // given + mockHttpSecureBundle(secureBundle()); + stubFor(any(urlPathEqualTo("/metadata")).willReturn(aResponse().withStatus(404))); + // when + URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); + assertThat(t).isInstanceOf(FileNotFoundException.class).hasMessageContaining("metadata"); + } + + @Test + public void should_throw_when_metadata_not_readable() throws Exception { + // given + mockHttpSecureBundle(secureBundle()); + mockProxyMetadataService("not a valid json payload"); + // when + URL configFile = new URL("http", "localhost", wireMockRule.port(), BUNDLE_PATH); + CloudConfigFactory cloudConfigFactory = new CloudConfigFactory(); + Throwable t = catchThrowable(() -> cloudConfigFactory.createCloudConfig(configFile)); + assertThat(t).isInstanceOf(JsonParseException.class).hasMessageContaining("Unrecognized token"); + } + + private void mockHttpSecureBundle(byte[] body) { + stubFor( + any(urlEqualTo(BUNDLE_PATH)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/octet-stream") + .withBody(body))); + } + + private void mockProxyMetadataService(String jsonMetadata) { + stubFor( + any(urlPathEqualTo("/metadata")) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/json") + .withBody(jsonMetadata))); + } + + private byte[] secureBundle() throws IOException, URISyntaxException { + return Files.readAllBytes(path(BUNDLE_PATH)); + } + + private String jsonMetadata() throws IOException, URISyntaxException { + return Joiner.on('\n') + .join(Files.readAllLines(path("/config/cloud/metadata.json"), StandardCharsets.UTF_8)); + } + + private Path path(String resource) throws URISyntaxException { + return Paths.get(getClass().getResource(resource).toURI()); + } + + private void assertCloudConfig(CloudConfig config) { + InetSocketAddress expectedProxyAddress = InetSocketAddress.createUnresolved("localhost", 30002); + assertThat(config.getLocalDatacenter()).isEqualTo("dc1"); + assertThat(config.getProxyAddress()).isEqualTo(expectedProxyAddress); + assertThat(config.getEndPoints()).extracting("proxyAddress").containsOnly(expectedProxyAddress); + assertThat(config.getEndPoints()) + .extracting("serverName") + .containsExactly( + "4ac06655-f861-49f9-881e-3fee22e69b94", + "2af7c253-3394-4a0d-bfac-f1ad81b5154d", + "b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"); + assertThat(config.getSslEngineFactory()).isNotNull().isInstanceOf(SniSslEngineFactory.class); + } + + static { + javax.net.ssl.HttpsURLConnection.setDefaultHostnameVerifier( + (hostname, sslSession) -> hostname.equals("localhost")); + } + + // see https://github.com/tomakehurst/wiremock/issues/874 + private static class HttpsServerFactory implements HttpServerFactory { + @Override + public HttpServer buildHttpServer( + Options options, + AdminRequestHandler adminRequestHandler, + StubRequestHandler stubRequestHandler) { + return new JettyHttpServer(options, adminRequestHandler, stubRequestHandler) { + @Override + protected ServerConnector createServerConnector( + String bindAddress, + JettySettings jettySettings, + int port, + NetworkTrafficListener listener, + ConnectionFactory... connectionFactories) { + if (port == options.httpsSettings().port()) { + SslConnectionFactory sslConnectionFactory = + (SslConnectionFactory) connectionFactories[0]; + SslContextFactory sslContextFactory = sslConnectionFactory.getSslContextFactory(); + sslContextFactory.setKeyStorePassword(options.httpsSettings().keyStorePassword()); + connectionFactories = + new ConnectionFactory[] {sslConnectionFactory, connectionFactories[1]}; + } + return super.createServerConnector( + bindAddress, jettySettings, port, listener, connectionFactories); + } + }; + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java new file mode 100644 index 00000000000..1d327a08101 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigReloadTest.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.composite; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class CompositeDriverConfigReloadTest { + + @Mock private DriverConfigLoader primaryLoader; + @Mock private DriverConfigLoader fallbackLoader; + private DriverConfigLoader compositeLoader; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + compositeLoader = DriverConfigLoader.compose(primaryLoader, fallbackLoader); + } + + @Test + @UseDataProvider("reloadabilities") + public void should_be_reloadable_if_either_child_is( + boolean primaryIsReloadable, + boolean fallbackIsReloadable, + boolean compositeShouldBeReloadable) { + when(primaryLoader.supportsReloading()).thenReturn(primaryIsReloadable); + when(fallbackLoader.supportsReloading()).thenReturn(fallbackIsReloadable); + assertThat(compositeLoader.supportsReloading()).isEqualTo(compositeShouldBeReloadable); + } + + @Test + @UseDataProvider("reloadabilities") + public void should_delegate_reloading_to_reloadable_children( + boolean primaryIsReloadable, + boolean fallbackIsReloadable, + boolean compositeShouldBeReloadable) { + when(primaryLoader.supportsReloading()).thenReturn(primaryIsReloadable); + when(primaryLoader.reload()) + .thenReturn( + primaryIsReloadable + ? CompletableFuture.completedFuture(true) + : CompletableFutures.failedFuture(new UnsupportedOperationException())); + + when(fallbackLoader.supportsReloading()).thenReturn(fallbackIsReloadable); + when(fallbackLoader.reload()) + .thenReturn( + fallbackIsReloadable + ? CompletableFuture.completedFuture(true) + : CompletableFutures.failedFuture(new UnsupportedOperationException())); + + CompletionStage reloadFuture = compositeLoader.reload(); + + if (compositeShouldBeReloadable) { + assertThat(reloadFuture).isCompletedWithValue(true); + } else { + assertThat(reloadFuture).isCompletedExceptionally(); + Throwable t = catchThrowable(() -> reloadFuture.toCompletableFuture().get()); + assertThat(t).hasRootCauseInstanceOf(UnsupportedOperationException.class); + } + verify(primaryLoader, primaryIsReloadable ? times(1) : never()).reload(); + verify(fallbackLoader, fallbackIsReloadable ? times(1) : never()).reload(); + } + + @DataProvider + public static Object[][] reloadabilities() { + return new Object[][] { + // primaryIsReloadable, fallbackIsReloadable, compositeShouldBeReloadable + new Object[] {true, true, true}, + new Object[] {true, false, true}, + new Object[] {false, true, true}, + new Object[] {false, false, false}, + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java new file mode 100644 index 00000000000..e5d5ffcdf83 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/composite/CompositeDriverConfigTest.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.composite; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.entry; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.api.core.config.TypedDriverOption; +import org.junit.Before; +import org.junit.Test; + +public class CompositeDriverConfigTest { + + private OptionsMap primaryMap; + private OptionsMap fallbackMap; + private DriverConfig compositeConfig; + private DriverExecutionProfile compositeDefaultProfile; + + @Before + public void setup() { + primaryMap = new OptionsMap(); + // We need at least one option so that the default profile exists. Do it now to avoid having to + // do it in every test. We use an option that we won't reuse in the tests so that there are no + // unwanted interactions. + primaryMap.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, 1); + + fallbackMap = new OptionsMap(); + fallbackMap.put(TypedDriverOption.CONTINUOUS_PAGING_MAX_PAGES, 1); + + DriverConfigLoader compositeLoader = + DriverConfigLoader.compose( + DriverConfigLoader.fromMap(primaryMap), DriverConfigLoader.fromMap(fallbackMap)); + compositeConfig = compositeLoader.getInitialConfig(); + compositeDefaultProfile = compositeConfig.getDefaultProfile(); + } + + @Test + public void should_use_value_from_primary_config() { + primaryMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); + + assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isTrue(); + assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(1); + assertThat(compositeDefaultProfile.entrySet()) + .containsExactly( + entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), + entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); + } + + @Test + public void should_ignore_value_from_fallback_config_if_defined_in_both() { + primaryMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); + fallbackMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); + + assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isTrue(); + assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(1); + assertThat(compositeDefaultProfile.entrySet()) + .containsExactly( + entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), + entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); + } + + @Test + public void should_use_value_from_fallback_config_if_not_defined_in_primary() { + fallbackMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); + + assertThat(compositeDefaultProfile.isDefined(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isTrue(); + assertThat(compositeDefaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(1); + assertThat(compositeDefaultProfile.entrySet()) + .containsExactly( + entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), + entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); + } + + @Test + public void should_merge_profiles() { + primaryMap.put("onlyInPrimary", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 1); + primaryMap.put("inBoth", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); + fallbackMap.put("inBoth", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 3); + fallbackMap.put("onlyInFallback", TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 4); + + assertThat(compositeConfig.getProfiles()) + .containsKeys( + DriverExecutionProfile.DEFAULT_NAME, + "onlyInPrimary", + "inBoth", + "inBoth", + "onlyInFallback"); + + assertThat( + compositeConfig + .getProfile("onlyInPrimary") + .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(1); + assertThat( + compositeConfig + .getProfile("inBoth") + .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(2); + assertThat( + compositeConfig + .getProfile("onlyInFallback") + .getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(4); + + assertThat(compositeConfig.getProfile("onlyInPrimary").entrySet()) + .containsExactly( + entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 1), + entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); + + assertThat(compositeConfig.getProfile("inBoth").entrySet()) + .containsExactly( + entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 2), + entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); + + assertThat(compositeConfig.getProfile("onlyInFallback").entrySet()) + .containsExactly( + entry(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(), 4), + entry(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES.getPath(), 1)); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java new file mode 100644 index 00000000000..93f6b274826 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigLoaderTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.map; + +import static com.typesafe.config.ConfigFactory.defaultReference; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.api.core.config.TypedDriverOption; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.config.MockOptions; +import com.datastax.oss.driver.internal.core.config.MockTypedOptions; +import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; +import com.typesafe.config.ConfigException; +import com.typesafe.config.ConfigFactory; +import java.util.Optional; +import org.junit.Test; + +public class MapBasedDriverConfigLoaderTest { + + @Test + public void should_reflect_changes_in_real_time() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 1); + + DriverConfigLoader loader = DriverConfigLoader.fromMap(source); + DriverConfig config = loader.getInitialConfig(); + assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(1); + + source.put(MockTypedOptions.INT1, 2); + assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(2); + } + + /** + * Checks that, if we ask to pre-fill the default profile, then we get the same set of options as + * the built-in reference.conf. + */ + @Test + public void should_fill_default_profile_like_reference_file() { + OptionsMap optionsMap = OptionsMap.driverDefaults(); + DriverExecutionProfile mapBasedConfig = + DriverConfigLoader.fromMap(optionsMap).getInitialConfig().getDefaultProfile(); + DriverExecutionProfile fileBasedConfig = + new DefaultDriverConfigLoader( + () -> { + // Only load reference.conf since we are focusing on driver defaults + ConfigFactory.invalidateCaches(); + return defaultReference().getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); + }) + .getInitialConfig() + .getDefaultProfile(); + + // Make sure we're not missing any options. -1 is for CONFIG_RELOAD_INTERVAL, which is not + // defined by OptionsMap because it is irrelevant for the map-based config. + assertThat(mapBasedConfig.entrySet()).hasSize(fileBasedConfig.entrySet().size() - 1); + + for (TypedDriverOption option : TypedDriverOption.builtInValues()) { + if (option.getRawOption() == DefaultDriverOption.CONFIG_RELOAD_INTERVAL) { + continue; + } + Optional fileBasedValue = get(fileBasedConfig, option); + Optional mapBasedValue = get(mapBasedConfig, option); + assertThat(mapBasedValue) + .as("Wrong value for %s in OptionsMap", option.getRawOption()) + .isEqualTo(fileBasedValue); + } + } + + private Optional get(DriverExecutionProfile config, TypedDriverOption typedOption) { + DriverOption option = typedOption.getRawOption(); + GenericType type = typedOption.getExpectedType(); + Object value = null; + if (config.isDefined(option)) { + // This is ugly, we have no other way than enumerating all possible types. + // This kind of bridging code between OptionsMap and DriverConfig is unlikely to exist + // anywhere outside of this test. + if (type.equals(GenericType.listOf(String.class))) { + value = config.getStringList(option); + } else if (type.equals(GenericType.STRING)) { + value = config.getString(option); + } else if (type.equals(GenericType.DURATION)) { + value = config.getDuration(option); + } else if (type.equals(GenericType.INTEGER)) { + value = config.getInt(option); + } else if (type.equals(GenericType.BOOLEAN)) { + value = config.getBoolean(option); + } else if (type.equals(GenericType.LONG)) { + try { + value = config.getLong(option); + } catch (ConfigException.WrongType e) { + value = config.getBytes(option); + } + } else if (type.equals(GenericType.mapOf(GenericType.STRING, GenericType.STRING))) { + value = config.getStringMap(option); + } else { + fail("Unexpected type " + type); + } + } + return Optional.ofNullable(value); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java new file mode 100644 index 00000000000..1ebd5fb48ba --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/map/MapBasedDriverConfigTest.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.map; + +import static com.datastax.oss.driver.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.internal.core.config.MockOptions; +import com.datastax.oss.driver.internal.core.config.MockTypedOptions; +import org.junit.Test; + +public class MapBasedDriverConfigTest { + + @Test + public void should_load_minimal_config_with_no_profiles() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 42); + DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); + + assertThat(config).hasIntOption(MockOptions.INT1, 42); + } + + @Test + public void should_inherit_option_in_profile() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 42); + // need to add an unrelated option to create the profile + source.put("profile1", MockTypedOptions.INT2, 1); + DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); + + assertThat(config) + .hasIntOption(MockOptions.INT1, 42) + .hasIntOption("profile1", MockOptions.INT1, 42); + } + + @Test + public void should_override_option_in_profile() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 42); + source.put("profile1", MockTypedOptions.INT1, 43); + DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); + + assertThat(config) + .hasIntOption(MockOptions.INT1, 42) + .hasIntOption("profile1", MockOptions.INT1, 43); + } + + @Test + public void should_create_derived_profile_with_new_option() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 42); + DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); + DriverExecutionProfile base = config.getDefaultProfile(); + DriverExecutionProfile derived = base.withInt(MockOptions.INT2, 43); + + assertThat(base.isDefined(MockOptions.INT2)).isFalse(); + assertThat(derived.isDefined(MockOptions.INT2)).isTrue(); + assertThat(derived.getInt(MockOptions.INT2)).isEqualTo(43); + } + + @Test + public void should_create_derived_profile_overriding_option() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 42); + DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); + DriverExecutionProfile base = config.getDefaultProfile(); + DriverExecutionProfile derived = base.withInt(MockOptions.INT1, 43); + + assertThat(base.getInt(MockOptions.INT1)).isEqualTo(42); + assertThat(derived.getInt(MockOptions.INT1)).isEqualTo(43); + } + + @Test + public void should_create_derived_profile_unsetting_option() { + OptionsMap source = new OptionsMap(); + source.put(MockTypedOptions.INT1, 42); + source.put(MockTypedOptions.INT2, 43); + DriverConfig config = DriverConfigLoader.fromMap(source).getInitialConfig(); + DriverExecutionProfile base = config.getDefaultProfile(); + DriverExecutionProfile derived = base.without(MockOptions.INT2); + + assertThat(base.getInt(MockOptions.INT2)).isEqualTo(43); + assertThat(derived.isDefined(MockOptions.INT2)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java index 5f63468f7fb..16b8f0b3aa6 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultDriverConfigLoaderTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,6 +30,7 @@ import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; +import com.datastax.oss.driver.internal.core.config.MockOptions; import com.datastax.oss.driver.internal.core.context.EventBus; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.context.NettyOptions; @@ -184,7 +187,7 @@ public void should_load_from_other_classpath_resource() { DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); // From customApplication.conf: assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(500)); + .isEqualTo(Duration.ofSeconds(5)); // From customApplication.json: assertThat(config.getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)).isEqualTo(2000); // From customApplication.properties: @@ -203,9 +206,109 @@ public void should_load_from_file() { DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); // From customApplication.conf: assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) - .isEqualTo(Duration.ofMillis(500)); + .isEqualTo(Duration.ofSeconds(5)); // From reference.conf: assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); } + + @Test + public void should_load_from_file_with_system_property() { + File file = new File("src/test/resources/config/customApplication.conf"); + assertThat(file).exists(); + System.setProperty("config.file", file.getAbsolutePath()); + try { + DriverConfigLoader loader = new DefaultDriverConfigLoader(); + DriverExecutionProfile config = loader.getInitialConfig().getDefaultProfile(); + // From customApplication.conf: + assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) + .isEqualTo(Duration.ofSeconds(5)); + // From reference.conf: + assertThat(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) + .isEqualTo(DefaultConsistencyLevel.SERIAL.name()); + } finally { + System.clearProperty("config.file"); + } + } + + @Test + public void should_return_failed_future_if_reloading_not_supported() { + DefaultDriverConfigLoader loader = + new DefaultDriverConfigLoader(() -> ConfigFactory.parseString(configSource.get()), false); + assertThat(loader.supportsReloading()).isFalse(); + CompletionStage stage = loader.reload(); + assertThatStage(stage) + .isFailed( + t -> + assertThat(t) + .isInstanceOf(UnsupportedOperationException.class) + .hasMessage( + "This instance of DefaultDriverConfigLoader does not support reloading")); + } + + /** Test for JAVA-2846. */ + @Test + public void should_load_setting_from_system_property_when_application_conf_is_also_provided() { + System.setProperty("datastax-java-driver.basic.request.timeout", "1 millisecond"); + try { + assertThat( + new DefaultDriverConfigLoader() + .getInitialConfig() + .getDefaultProfile() + .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) + .isEqualTo(Duration.ofMillis(1)); + } finally { + System.clearProperty("datastax-java-driver.basic.request.timeout"); + } + } + + /** Test for JAVA-2846. */ + @Test + public void + should_load_and_resolve_setting_from_system_property_when_application_conf_is_also_provided() { + System.setProperty( + "datastax-java-driver.advanced.connection.init-query-timeout", "1234 milliseconds"); + try { + assertThat( + new DefaultDriverConfigLoader() + .getInitialConfig() + .getDefaultProfile() + .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) + .isEqualTo(Duration.ofMillis(1234)); + } finally { + System.clearProperty("datastax-java-driver.advanced.connection.init-query-timeout"); + } + } + + /** Test for JAVA-2846. */ + @Test + public void + should_load_setting_from_system_property_when_application_conf_is_also_provided_for_custom_classloader() { + System.setProperty("datastax-java-driver.basic.request.timeout", "1 millisecond"); + try { + assertThat( + new DefaultDriverConfigLoader(Thread.currentThread().getContextClassLoader()) + .getInitialConfig() + .getDefaultProfile() + .getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) + .isEqualTo(Duration.ofMillis(1)); + } finally { + System.clearProperty("datastax-java-driver.basic.request.timeout"); + } + } + + @Test + public void should_create_from_string() { + DriverExecutionProfile config = + DriverConfigLoader.fromString( + "datastax-java-driver.basic { session-name = my-app\nrequest.timeout = 1 millisecond }") + .getInitialConfig() + .getDefaultProfile(); + + assertThat(config.getString(DefaultDriverOption.SESSION_NAME)).isEqualTo("my-app"); + assertThat(config.getDuration(DefaultDriverOption.REQUEST_TIMEOUT)) + .isEqualTo(Duration.ofMillis(1)); + // Any option not in the string should be pulled from reference.conf + assertThat(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("LOCAL_ONE"); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java index c38e3b7ef29..4f2edf98246 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/DefaultProgrammaticDriverConfigLoaderBuilderTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +19,10 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.internal.core.config.MockOptions; import com.typesafe.config.ConfigFactory; import org.junit.Test; @@ -27,6 +31,22 @@ public class DefaultProgrammaticDriverConfigLoaderBuilderTest { private static final String FALLBACK_CONFIG = "int1 = 1\nint2 = 2\nprofiles.profile1 { int1 = 11 }"; + @Test + public void should_override_option() { + DriverConfigLoader loader = + new DefaultProgrammaticDriverConfigLoaderBuilder( + () -> ConfigFactory.parseString(FALLBACK_CONFIG), "") + .withInt(MockOptions.INT1, 2) + .withInt(MockOptions.INT1, 3) + .withInt(MockOptions.INT1, 4) + .withInt(MockOptions.INT2, 3) + .withInt(MockOptions.INT2, 4) + .build(); + DriverConfig config = loader.getInitialConfig(); + assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(4); + assertThat(config.getDefaultProfile().getInt(MockOptions.INT2)).isEqualTo(4); + } + @Test public void should_override_option_in_default_profile() { DriverConfigLoader loader = @@ -95,4 +115,25 @@ public void should_handle_multiple_programmatic_profiles() { assertThat(config.getProfile("profile2").getInt(MockOptions.INT1)).isEqualTo(3); assertThat(config.getProfile("profile3").getInt(MockOptions.INT1)).isEqualTo(4); } + + @Test + public void should_honor_root_path() { + String rootPath = "test-root"; + String propertyKey = rootPath + "." + DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE.getPath(); + try { + System.setProperty(propertyKey, "42"); + DriverConfigLoader loader = + new DefaultProgrammaticDriverConfigLoaderBuilder( + DefaultProgrammaticDriverConfigLoaderBuilder.DEFAULT_FALLBACK_SUPPLIER, rootPath) + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 1234) + .build(); + DriverConfig config = loader.getInitialConfig(); + assertThat(config.getDefaultProfile().getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)) + .isEqualTo(42); + assertThat(config.getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)) + .isEqualTo(1234); + } finally { + System.clearProperty(propertyKey); + } + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java new file mode 100644 index 00000000000..2f2f0a9b3c1 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypeSafeDriverConfigOverrideDefaultsTest.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.config.typesafe; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; +import java.time.Duration; +import java.util.Map; +import org.junit.Test; + +/** Focuses on {@link TypesafeDriverConfig#overrideDefaults(Map)}. */ +public class TypeSafeDriverConfigOverrideDefaultsTest { + + @Test + public void should_replace_if_value_comes_from_reference() { + // Given + TypesafeDriverConfig config = config(""); + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); + + // When + config.overrideDefaults( + ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); + + // Then + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_QUORUM"); + } + + @Test + public void should_replace_multiple_times() { + // Given + TypesafeDriverConfig config = config(""); + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); + + // When + config.overrideDefaults( + ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); + config.overrideDefaults(ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "TWO")); + + // Then + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("TWO"); + } + + @Test + public void should_not_replace_if_overridden_from_application() { + // Given + TypesafeDriverConfig config = + config("datastax-java-driver.basic.request.consistency = LOCAL_ONE"); + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); + + // When + config.overrideDefaults( + ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); + + // Then + // not replaced because it was set explictly in application.conf + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); + } + + @Test + public void should_handle_reloads() { + // Given + TypesafeDriverConfig config = config(""); + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); + + // When + config.overrideDefaults( + ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); + reload(config, ""); + + // Then + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_QUORUM"); + + // When + reload(config, "datastax-java-driver.basic.request.consistency = ONE"); + + // Then + // overridden default not used anymore if the reload detected a user change + assertThat(config.getDefaultProfile().getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("ONE"); + } + + @Test + public void should_ignore_non_existent_option() { + // Given + TypesafeDriverConfig config = config(""); + DriverOption nonExistent = () -> "non existent"; + + // When + config.overrideDefaults(ImmutableMap.of(nonExistent, "IRRELEVANT")); + + // Then + assertThat(config.getDefaultProfile().isDefined(nonExistent)).isFalse(); + } + + @Test + public void should_handle_profiles() { + // Given + TypesafeDriverConfig config = + config( + "datastax-java-driver.profiles.profile1.basic.request.consistency = TWO\n" + + "datastax-java-driver.profiles.profile2.basic.request.timeout = 5 seconds"); + DriverExecutionProfile profile1 = config.getProfile("profile1"); + DriverExecutionProfile profile2 = config.getProfile("profile2"); + DriverExecutionProfile derivedProfile21 = + profile2.withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)); + DriverExecutionProfile derivedProfile22 = + profile2.withString(DefaultDriverOption.REQUEST_CONSISTENCY, "QUORUM"); + assertThat(profile1.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("TWO"); + assertThat(profile2.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); // inherited from default profile in reference.conf + assertThat(derivedProfile21.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_ONE"); // inherited from default profile in reference.conf + assertThat(derivedProfile22.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("QUORUM"); // overridden programmatically + + // When + config.overrideDefaults( + ImmutableMap.of(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM")); + + // Then + // Unaffected because it was set manually in application.conf: + assertThat(profile1.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).isEqualTo("TWO"); + // Affected because it was using the default from reference.conf: + assertThat(profile2.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_QUORUM"); + // Same: + assertThat(derivedProfile21.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("LOCAL_QUORUM"); + // Unaffected because it was overridden programmatically: + assertThat(derivedProfile22.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) + .isEqualTo("QUORUM"); + } + + // Builds a config based on reference.conf + the given application.conf overrides + private TypesafeDriverConfig config(String application) { + return new TypesafeDriverConfig(rawConfig(application)); + } + + private boolean reload(TypesafeDriverConfig config, String newApplication) { + return config.reload(rawConfig(newApplication)); + } + + private Config rawConfig(String application) { + ConfigFactory.invalidateCaches(); + return ConfigFactory.parseString(application) + .withFallback(ConfigFactory.defaultReference()) + .resolve() + .getConfig(DefaultDriverConfigLoader.DEFAULT_ROOT_PATH); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java index 32889e24afb..4a78c3ccb03 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/config/typesafe/TypesafeDriverConfigTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,18 +21,15 @@ import static org.assertj.core.api.Assertions.entry; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.config.MockOptions; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import java.util.HashMap; import java.util.Map; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class TypesafeDriverConfigTest { - @Rule public ExpectedException expectedException = ExpectedException.none(); - @Test public void should_load_minimal_config_with_no_profiles() { TypesafeDriverConfig config = parse("int1 = 42"); @@ -102,7 +101,6 @@ public void should_fetch_string_map() { parse( "int1 = 42 \n auth_provider { auth_thing_one= one \n auth_thing_two = two \n auth_thing_three = three}"); DriverExecutionProfile base = config.getDefaultProfile(); - base.getStringMap(MockOptions.AUTH_PROVIDER); Map map = base.getStringMap(MockOptions.AUTH_PROVIDER); assertThat(map.entrySet().size()).isEqualTo(3); assertThat(map.get("auth_thing_one")).isEqualTo("one"); @@ -110,6 +108,19 @@ public void should_fetch_string_map() { assertThat(map.get("auth_thing_three")).isEqualTo("three"); } + @Test + public void should_fetch_string_map_with_forward_slash_in_keys() { + TypesafeDriverConfig config = + parse( + "subnet_addresses { 100.64.0.0/15 = \"cassandra.datacenter1.com:9042\" \n \"100.66.0.0/15\" = \"cassandra.datacenter2.com\" \n \"::ffff:6440:0/111\" = \"cassandra.datacenter3.com:19042\" }"); + DriverExecutionProfile base = config.getDefaultProfile(); + Map map = base.getStringMap(MockOptions.SUBNET_ADDRESSES); + assertThat(map.entrySet().size()).isEqualTo(3); + assertThat(map.get("100.64.0.\"0/15\"")).isEqualTo("cassandra.datacenter1.com:9042"); + assertThat(map.get("\"100.66.0.0/15\"")).isEqualTo("cassandra.datacenter2.com"); + assertThat(map.get("\"::ffff:6440:0/111\"")).isEqualTo("cassandra.datacenter3.com:19042"); + } + @Test public void should_create_derived_profile_with_string_map() { TypesafeDriverConfig config = parse("int1 = 42"); @@ -174,6 +185,14 @@ public void should_enumerate_options() { entry("int1", 45)); } + @Test + public void should_update_default_profile_on_reload() { + TypesafeDriverConfig config = parse("int1 = 42\n profiles { profile1 { int1 = 43 } }"); + assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(42); + config.reload(ConfigFactory.parseString("int1 = 44\n profiles { profile1 { int1 = 45 } }")); + assertThat(config.getDefaultProfile().getInt(MockOptions.INT1)).isEqualTo(44); + } + private TypesafeDriverConfig parse(String configString) { Config config = ConfigFactory.parseString(configString); return new TypesafeDriverConfig(config); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java index 3f1a7c12bb5..9a973c1b0e4 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicyTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java new file mode 100644 index 00000000000..6d4585cb4d7 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/context/DefaultDriverContextTest.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.context; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.protocol.Lz4Compressor; +import com.datastax.oss.driver.internal.core.protocol.SnappyCompressor; +import com.datastax.oss.protocol.internal.Compressor; +import com.datastax.oss.protocol.internal.NoopCompressor; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import io.netty.buffer.ByteBuf; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class DefaultDriverContextTest { + + private DefaultDriverContext buildMockedContext(Optional compressionOption) { + + DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn(compressionOption.orElse("none")); + return MockedDriverContextFactory.defaultDriverContext(defaultProfile); + } + + private void doCreateCompressorTest(Optional configVal, Class expectedClz) { + + DefaultDriverContext ctx = buildMockedContext(configVal); + Compressor compressor = ctx.getCompressor(); + assertThat(compressor).isNotNull(); + assertThat(compressor).isInstanceOf(expectedClz); + } + + @Test + @DataProvider({"lz4", "lZ4", "Lz4", "LZ4"}) + public void should_create_lz4_compressor(String name) { + + doCreateCompressorTest(Optional.of(name), Lz4Compressor.class); + } + + @Test + @DataProvider({"snappy", "SNAPPY", "sNaPpY", "SNapPy"}) + public void should_create_snappy_compressor(String name) { + + doCreateCompressorTest(Optional.of(name), SnappyCompressor.class); + } + + @Test + public void should_create_noop_compressor_if_undefined() { + + doCreateCompressorTest(Optional.empty(), NoopCompressor.class); + } + + @Test + @DataProvider({"none", "NONE", "NoNe", "nONe"}) + public void should_create_noop_compressor_if_defined_as_none(String name) { + + doCreateCompressorTest(Optional.of(name), NoopCompressor.class); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java new file mode 100644 index 00000000000..a8b25193f54 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/context/MockedDriverContextFactory.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.context; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; +import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; +import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.Maps; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; + +public class MockedDriverContextFactory { + + public static DefaultDriverContext defaultDriverContext() { + return defaultDriverContext(MockedDriverContextFactory.defaultProfile("datacenter1")); + } + + public static DefaultDriverContext defaultDriverContext( + DriverExecutionProfile defaultProfile, DriverExecutionProfile... profiles) { + + /* Setup machinery to connect the input DriverExecutionProfile to the config loader */ + final DriverConfig driverConfig = mock(DriverConfig.class); + final DriverConfigLoader configLoader = mock(DriverConfigLoader.class); + when(configLoader.getInitialConfig()).thenReturn(driverConfig); + when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); + when(driverConfig.getProfile(defaultProfile.getName())).thenReturn(defaultProfile); + + for (DriverExecutionProfile profile : profiles) { + when(driverConfig.getProfile(profile.getName())).thenReturn(profile); + } + + ProgrammaticArguments args = + ProgrammaticArguments.builder() + .withNodeStateListener(mock(NodeStateListener.class)) + .withSchemaChangeListener(mock(SchemaChangeListener.class)) + .withRequestTracker(mock(RequestTracker.class)) + .withLocalDatacenters(Maps.newHashMap()) + .withNodeDistanceEvaluators(Maps.newHashMap()) + .build(); + + return new DefaultDriverContext(configLoader, args) { + @NonNull + @Override + public Map getLoadBalancingPolicies() { + ImmutableMap.Builder map = ImmutableMap.builder(); + map.put( + defaultProfile.getName(), + mockLoadBalancingPolicy( + this, + defaultProfile.getName(), + defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER))); + for (DriverExecutionProfile profile : profiles) { + map.put( + profile.getName(), + mockLoadBalancingPolicy( + this, + profile.getName(), + profile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER))); + } + return map.build(); + } + + @NonNull + @Override + public ConsistencyLevelRegistry getConsistencyLevelRegistry() { + return mock(ConsistencyLevelRegistry.class); + } + }; + } + + public static DriverExecutionProfile defaultProfile(String localDc) { + return createProfile(DriverExecutionProfile.DEFAULT_NAME, localDc); + } + + public static DriverExecutionProfile createProfile(String name, String localDc) { + DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); + when(defaultProfile.getName()).thenReturn(name); + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn("none"); + when(defaultProfile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofMinutes(5)); + when(defaultProfile.isDefined(DefaultDriverOption.METRICS_FACTORY_CLASS)).thenReturn(true); + when(defaultProfile.getString(DefaultDriverOption.METRICS_FACTORY_CLASS)) + .thenReturn("DefaultMetricsFactory"); + when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(localDc); + return defaultProfile; + } + + public static void allowRemoteDcConnectivity( + DriverExecutionProfile profile, + int maxNodesPerRemoteDc, + boolean allowRemoteSatisfyLocalDc, + List preferredRemoteDcs) { + when(profile.getInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .thenReturn(maxNodesPerRemoteDc); + when(profile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) + .thenReturn(allowRemoteSatisfyLocalDc); + when(profile.getStringList(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)) + .thenReturn(preferredRemoteDcs); + } + + private static LoadBalancingPolicy mockLoadBalancingPolicy( + DefaultDriverContext driverContext, String profile, String localDc) { + LoadBalancingPolicy loadBalancingPolicy = + new DefaultLoadBalancingPolicy(driverContext, profile) { + @NonNull + @Override + protected Optional discoverLocalDc(@NonNull Map nodes) { + return Optional.ofNullable(localDc); + } + + @NonNull + @Override + protected NodeDistanceEvaluator createNodeDistanceEvaluator( + @Nullable String localDc, @NonNull Map nodes) { + return mock(NodeDistanceEvaluator.class); + } + }; + loadBalancingPolicy.init( + Collections.emptyMap(), mock(LoadBalancingPolicy.DistanceReporter.class)); + return loadBalancingPolicy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java index 21eea2aa331..d12e50b7e8e 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/context/StartupOptionsBuilderTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,67 +18,35 @@ package com.datastax.oss.driver.internal.core.context; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfig; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.shaded.guava.common.collect.Lists; -import com.datastax.oss.driver.shaded.guava.common.collect.Maps; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.protocol.internal.request.Startup; -import java.util.List; -import java.util.Map; -import java.util.function.Predicate; -import org.junit.Before; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; +import org.junit.runner.RunWith; +@RunWith(DataProviderRunner.class) public class StartupOptionsBuilderTest { - private DefaultDriverContext defaultDriverContext; - - // Mocks for instantiating the default driver context - @Mock private DriverConfigLoader configLoader; - private List> typeCodecs = Lists.newArrayList(); - @Mock private NodeStateListener nodeStateListener; - @Mock private SchemaChangeListener schemaChangeListener; - @Mock private RequestTracker requestTracker; - private Map localDatacenters = Maps.newHashMap(); - private Map> nodeFilters = Maps.newHashMap(); - @Mock private ClassLoader classLoader; - @Mock private DriverConfig driverConfig; - @Mock private DriverExecutionProfile defaultProfile; - - @Before - public void before() { - MockitoAnnotations.initMocks(this); - when(configLoader.getInitialConfig()).thenReturn(driverConfig); - when(driverConfig.getDefaultProfile()).thenReturn(defaultProfile); - } + private DefaultDriverContext buildMockedContext(String compression) { - private void buildDriverContext() { - defaultDriverContext = - new DefaultDriverContext( - configLoader, - typeCodecs, - nodeStateListener, - schemaChangeListener, - requestTracker, - localDatacenters, - nodeFilters, - classLoader); + DriverExecutionProfile defaultProfile = mock(DriverExecutionProfile.class); + when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION, "none")) + .thenReturn(compression); + when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); + return MockedDriverContextFactory.defaultDriverContext(defaultProfile); } private void assertDefaultStartupOptions(Startup startup) { + assertThat(startup.options).containsEntry(Startup.CQL_VERSION_KEY, "3.0.0"); assertThat(startup.options) .containsEntry( @@ -87,22 +57,82 @@ private void assertDefaultStartupOptions(Startup startup) { } @Test - public void should_build_minimal_startup_options() { - buildDriverContext(); - Startup startup = new Startup(defaultDriverContext.getStartupOptions()); + public void should_build_startup_options_with_no_compression_if_undefined() { + + DefaultDriverContext ctx = MockedDriverContextFactory.defaultDriverContext(); + Startup startup = new Startup(ctx.getStartupOptions()); assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); assertDefaultStartupOptions(startup); } @Test - public void should_build_startup_options_with_compression() { - when(defaultProfile.isDefined(DefaultDriverOption.PROTOCOL_COMPRESSION)) - .thenReturn(Boolean.TRUE); - when(defaultProfile.getString(DefaultDriverOption.PROTOCOL_COMPRESSION)).thenReturn("lz4"); - buildDriverContext(); - Startup startup = new Startup(defaultDriverContext.getStartupOptions()); + public void should_build_startup_options_with_no_compression_if_defined_as_none() { + + DefaultDriverContext ctx = buildMockedContext("none"); + Startup startup = new Startup(ctx.getStartupOptions()); + assertThat(startup.options).doesNotContainKey(Startup.COMPRESSION_KEY); + assertDefaultStartupOptions(startup); + } + + @Test + @DataProvider({"lz4", "snappy"}) + public void should_build_startup_options(String compression) { + + DefaultDriverContext ctx = buildMockedContext(compression); + Startup startup = new Startup(ctx.getStartupOptions()); // assert the compression option is present - assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, "lz4"); + assertThat(startup.options).containsEntry(Startup.COMPRESSION_KEY, compression); assertDefaultStartupOptions(startup); } + + @Test + public void should_fail_to_build_startup_options_with_invalid_compression() { + + assertThatIllegalArgumentException() + .isThrownBy( + () -> { + DefaultDriverContext ctx = buildMockedContext("foobar"); + new Startup(ctx.getStartupOptions()); + }); + } + + @Test + public void should_include_all_local_dcs_in_startup_message() { + + DefaultDriverContext ctx = + MockedDriverContextFactory.defaultDriverContext( + MockedDriverContextFactory.defaultProfile("us-west-2"), + MockedDriverContextFactory.createProfile("oltp", "us-east-2"), + MockedDriverContextFactory.createProfile("olap", "eu-central-1")); + Startup startup = new Startup(ctx.getStartupOptions()); + assertThat(startup.options) + .containsEntry( + StartupOptionsBuilder.DRIVER_BAGGAGE, + "{\"default\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"us-west-2\"}}," + + "\"oltp\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"us-east-2\"}}," + + "\"olap\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"eu-central-1\"}}}"); + } + + @Test + public void should_include_all_lbp_details_in_startup_message() { + + DriverExecutionProfile defaultProfile = MockedDriverContextFactory.defaultProfile("dc1"); + DriverExecutionProfile oltpProfile = MockedDriverContextFactory.createProfile("oltp", "dc1"); + MockedDriverContextFactory.allowRemoteDcConnectivity( + oltpProfile, 2, true, ImmutableList.of("dc2", "dc3")); + DefaultDriverContext ctx = + MockedDriverContextFactory.defaultDriverContext(defaultProfile, oltpProfile); + + Startup startup = new Startup(ctx.getStartupOptions()); + + assertThat(startup.options) + .containsEntry( + StartupOptionsBuilder.DRIVER_BAGGAGE, + "{\"default\":{\"DefaultLoadBalancingPolicy\":{\"localDc\":\"dc1\"}}," + + "\"oltp\":{\"DefaultLoadBalancingPolicy\":{" + + "\"localDc\":\"dc1\"," + + "\"preferredRemoteDcs\":[\"dc2\",\"dc3\"]," + + "\"allowDcFailoverForLocalCl\":true," + + "\"maxNodesPerRemoteDc\":2}}}"); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java index 1bf71a7ce6c..61533a8e8e9 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/context/bus/EventBusTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java index 7aaebe73b68..cb83b523ebe 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionEventsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.control; import static com.datastax.oss.driver.Assertions.assertThat; +import static org.awaitility.Awaitility.await; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -46,16 +49,19 @@ public void should_register_for_all_events_if_topology_requested() { // When controlConnection.init(true, false, false); - waitForPendingAdminTasks(); - DriverChannelOptions channelOptions = optionsCaptor.getValue(); // Then - assertThat(channelOptions.eventTypes) - .containsExactly( - ProtocolConstants.EventType.SCHEMA_CHANGE, - ProtocolConstants.EventType.STATUS_CHANGE, - ProtocolConstants.EventType.TOPOLOGY_CHANGE); - assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); + await() + .untilAsserted( + () -> { + DriverChannelOptions channelOptions = optionsCaptor.getValue(); + assertThat(channelOptions.eventTypes) + .containsExactly( + ProtocolConstants.EventType.SCHEMA_CHANGE, + ProtocolConstants.EventType.STATUS_CHANGE, + ProtocolConstants.EventType.TOPOLOGY_CHANGE); + assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); + }); } @Test @@ -69,13 +75,16 @@ public void should_register_for_schema_events_only_if_topology_not_requested() { // When controlConnection.init(false, false, false); - waitForPendingAdminTasks(); - DriverChannelOptions channelOptions = optionsCaptor.getValue(); // Then - assertThat(channelOptions.eventTypes) - .containsExactly(ProtocolConstants.EventType.SCHEMA_CHANGE); - assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); + await() + .untilAsserted( + () -> { + DriverChannelOptions channelOptions = optionsCaptor.getValue(); + assertThat(channelOptions.eventTypes) + .containsExactly(ProtocolConstants.EventType.SCHEMA_CHANGE); + assertThat(channelOptions.eventCallback).isEqualTo(controlConnection); + }); } @Test @@ -87,7 +96,7 @@ public void should_process_status_change_events() { when(channelFactory.connect(eq(node1), optionsCaptor.capture())) .thenReturn(CompletableFuture.completedFuture(channel1)); controlConnection.init(true, false, false); - waitForPendingAdminTasks(); + await().until(() -> optionsCaptor.getValue() != null); EventCallback callback = optionsCaptor.getValue().eventCallback; StatusChangeEvent event = new StatusChangeEvent(ProtocolConstants.StatusChangeType.UP, ADDRESS1); @@ -108,7 +117,7 @@ public void should_process_topology_change_events() { when(channelFactory.connect(eq(node1), optionsCaptor.capture())) .thenReturn(CompletableFuture.completedFuture(channel1)); controlConnection.init(true, false, false); - waitForPendingAdminTasks(); + await().until(() -> optionsCaptor.getValue() != null); EventCallback callback = optionsCaptor.getValue().eventCallback; TopologyChangeEvent event = new TopologyChangeEvent(ProtocolConstants.TopologyChangeType.NEW_NODE, ADDRESS1); @@ -129,7 +138,7 @@ public void should_process_schema_change_events() { when(channelFactory.connect(eq(node1), optionsCaptor.capture())) .thenReturn(CompletableFuture.completedFuture(channel1)); controlConnection.init(false, false, false); - waitForPendingAdminTasks(); + await().until(() -> optionsCaptor.getValue() != null); EventCallback callback = optionsCaptor.getValue().eventCallback; SchemaChangeEvent event = new SchemaChangeEvent( diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java index 845c0435aa4..526efefa2fe 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.awaitility.Awaitility.await; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -34,6 +37,7 @@ import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; import org.junit.Test; import org.junit.runner.RunWith; @@ -59,12 +63,11 @@ public void should_init_with_first_contact_point_if_reachable() { // When CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); // Then - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); factoryHelper.verifyNoMoreCalls(); } @@ -101,13 +104,12 @@ public void should_init_with_second_contact_point_if_first_one_fails() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); // Then assertThatStage(initFuture) .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel2)); - verify(eventBus).fire(ChannelEvent.controlConnectionFailed(node1)); - verify(eventBus).fire(ChannelEvent.channelOpened(node2)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); // each attempt tries all nodes, so there is no reconnection verify(reconnectionPolicy, never()).newNodeSchedule(any(Node.class)); @@ -127,12 +129,11 @@ public void should_fail_to_init_if_all_contact_points_fail() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); // Then assertThatStage(initFuture).isFailed(); - verify(eventBus).fire(ChannelEvent.controlConnectionFailed(node1)); - verify(eventBus).fire(ChannelEvent.controlConnectionFailed(node2)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.controlConnectionFailed(node2)); // no reconnections at init verify(reconnectionPolicy, never()).newNodeSchedule(any(Node.class)); @@ -155,26 +156,23 @@ public void should_reconnect_if_channel_goes_down() throws Exception { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // When channel1.close(); - waitForPendingAdminTasks(); // Then // a reconnection was started - verify(reconnectionSchedule).nextDelay(); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); factoryHelper.waitForCall(node1); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); - assertThat(controlConnection.channel()).isEqualTo(channel2); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager).refreshNodes(); - verify(loadBalancingPolicyWrapper).init(); + await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); + verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); + verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); factoryHelper.verifyNoMoreCalls(); } @@ -194,26 +192,23 @@ public void should_reconnect_if_node_becomes_ignored() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // When mockQueryPlan(node2); eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node1)); - waitForPendingAdminTasks(); // Then // an immediate reconnection was started - verify(reconnectionSchedule, never()).nextDelay(); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); - assertThat(controlConnection.channel()).isEqualTo(channel2); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager).refreshNodes(); - verify(loadBalancingPolicyWrapper).init(); + await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); + verify(reconnectionSchedule, never()).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); + verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); + verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); factoryHelper.verifyNoMoreCalls(); } @@ -242,26 +237,23 @@ private void should_reconnect_if_event(NodeStateEvent event) { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // When mockQueryPlan(node2); eventBus.fire(event); - waitForPendingAdminTasks(); // Then // an immediate reconnection was started - verify(reconnectionSchedule, never()).nextDelay(); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); - assertThat(controlConnection.channel()).isEqualTo(channel2); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus).fire(ChannelEvent.channelOpened(node2)); - verify(metadataManager).refreshNodes(); - verify(loadBalancingPolicyWrapper).init(); + await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); + verify(reconnectionSchedule, never()).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); + verify(metadataManager, VERIFY_TIMEOUT).refreshNodes(); + verify(loadBalancingPolicyWrapper, VERIFY_TIMEOUT).init(); factoryHelper.verifyNoMoreCalls(); } @@ -286,17 +278,15 @@ public void should_reconnect_if_node_became_ignored_during_reconnection_attempt( CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); mockQueryPlan(node2, node1); // channel1 goes down, triggering a reconnection channel1.close(); - waitForPendingAdminTasks(); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); // the reconnection to node2 is in progress factoryHelper.waitForCall(node2); @@ -305,11 +295,10 @@ public void should_reconnect_if_node_became_ignored_during_reconnection_attempt( eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); // the reconnection to node2 completes channel2Future.complete(channel2); - waitForPendingAdminTasks(); // Then // The channel should get closed and we should try the next node - verify(channel2).forceClose(); + verify(channel2, VERIFY_TIMEOUT).forceClose(); factoryHelper.waitForCall(node1); } @@ -343,17 +332,15 @@ private void should_reconnect_if_event_during_reconnection_attempt(NodeStateEven CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); mockQueryPlan(node2, node1); // channel1 goes down, triggering a reconnection channel1.close(); - waitForPendingAdminTasks(); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); // the reconnection to node2 is in progress factoryHelper.waitForCall(node2); @@ -362,11 +349,10 @@ private void should_reconnect_if_event_during_reconnection_attempt(NodeStateEven eventBus.fire(event); // the reconnection to node2 completes channel2Future.complete(channel2); - waitForPendingAdminTasks(); // Then // The channel should get closed and we should try the next node - verify(channel2).forceClose(); + verify(channel2, VERIFY_TIMEOUT).forceClose(); factoryHelper.waitForCall(node1); } @@ -386,26 +372,23 @@ public void should_force_reconnection_if_pending() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // the channel fails and a reconnection is scheduled for later channel1.close(); - waitForPendingAdminTasks(); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); // When controlConnection.reconnectNow(); factoryHelper.waitForCall(node1); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); // Then - assertThat(controlConnection.channel()).isEqualTo(channel2); - verify(eventBus).fire(ChannelEvent.channelOpened(node2)); + await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); factoryHelper.verifyNoMoreCalls(); } @@ -424,10 +407,9 @@ public void should_force_reconnection_even_if_connected() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // When controlConnection.reconnectNow(); @@ -435,41 +417,39 @@ public void should_force_reconnection_even_if_connected() { // Then factoryHelper.waitForCall(node1); factoryHelper.waitForCall(node2); - waitForPendingAdminTasks(); - assertThat(controlConnection.channel()).isEqualTo(channel2); - verify(channel1).forceClose(); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(eventBus).fire(ChannelEvent.channelOpened(node2)); + await().untilAsserted(() -> assertThat(controlConnection.channel()).isEqualTo(channel2)); + verify(channel1, VERIFY_TIMEOUT).forceClose(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node2)); factoryHelper.verifyNoMoreCalls(); } @Test - public void should_not_force_reconnection_if_not_init() { + public void should_not_force_reconnection_if_not_init() throws InterruptedException { // When controlConnection.reconnectNow(); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(500); // Then verify(reconnectionSchedule, never()).nextDelay(); } @Test - public void should_not_force_reconnection_if_closed() { + public void should_not_force_reconnection_if_closed() throws InterruptedException { // Given DriverChannel channel1 = newMockDriverChannel(1); MockChannelFactoryHelper factoryHelper = MockChannelFactoryHelper.builder(channelFactory).success(node1, channel1).build(); CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); CompletionStage closeFuture = controlConnection.forceCloseAsync(); assertThatStage(closeFuture).isSuccess(); // When controlConnection.reconnectNow(); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(500); // Then verify(reconnectionSchedule, never()).nextDelay(); @@ -486,16 +466,14 @@ public void should_close_channel_when_closing() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); // When CompletionStage closeFuture = controlConnection.forceCloseAsync(); - waitForPendingAdminTasks(); // Then assertThatStage(closeFuture).isSuccess(); - verify(channel1).forceClose(); + verify(channel1, VERIFY_TIMEOUT).forceClose(); factoryHelper.verifyNoMoreCalls(); } @@ -517,29 +495,26 @@ public void should_close_channel_if_closed_during_reconnection() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // the channel fails and a reconnection is scheduled channel1.close(); - waitForPendingAdminTasks(); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); factoryHelper.waitForCall(node1); // channel2 starts initializing (but the future is not completed yet) factoryHelper.waitForCall(node2); // When // the control connection gets closed before channel2 initialization is complete - controlConnection.forceCloseAsync(); - waitForPendingAdminTasks(); + CompletionStage closeFuture = controlConnection.forceCloseAsync(); + assertThatStage(closeFuture).isSuccess(); channel2Future.complete(channel2); - waitForPendingAdminTasks(); // Then - verify(channel2).forceClose(); + verify(channel2, VERIFY_TIMEOUT).forceClose(); // no event because the control connection never "owned" the channel verify(eventBus, never()).fire(ChannelEvent.channelOpened(node2)); verify(eventBus, never()).fire(ChannelEvent.channelClosed(node2)); @@ -564,24 +539,22 @@ public void should_handle_channel_failure_if_closed_during_reconnection() { CompletionStage initFuture = controlConnection.init(false, false, false); factoryHelper.waitForCall(node1); - waitForPendingAdminTasks(); - assertThatStage(initFuture).isSuccess(); - assertThat(controlConnection.channel()).isEqualTo(channel1); - verify(eventBus).fire(ChannelEvent.channelOpened(node1)); + assertThatStage(initFuture) + .isSuccess(v -> assertThat(controlConnection.channel()).isEqualTo(channel1)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node1)); // the channel fails and a reconnection is scheduled channel1.close(); - waitForPendingAdminTasks(); - verify(eventBus).fire(ChannelEvent.channelClosed(node1)); - verify(reconnectionSchedule).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node1)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); // channel1 starts initializing (but the future is not completed yet) factoryHelper.waitForCall(node1); // When // the control connection gets closed before channel1 initialization fails - controlConnection.forceCloseAsync(); + CompletionStage closeFuture = controlConnection.forceCloseAsync(); + assertThatStage(closeFuture).isSuccess(); channel1Future.completeExceptionally(new Exception("mock failure")); - waitForPendingAdminTasks(); // Then // should never try channel2 because the reconnection has detected that it can stop after the diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java index a25b7c97f52..c52199465a8 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/control/ControlConnectionTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +17,12 @@ */ package com.datastax.oss.driver.internal.core.control; -import static org.assertj.core.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.when; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -38,28 +42,27 @@ import com.datastax.oss.driver.internal.core.metadata.MetadataManager; import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; import io.netty.channel.Channel; import io.netty.channel.DefaultChannelPromise; import io.netty.channel.DefaultEventLoopGroup; import io.netty.channel.EventLoop; -import io.netty.util.concurrent.Future; import java.net.InetSocketAddress; import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Exchanger; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import org.junit.After; import org.junit.Before; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.mockito.verification.VerificationWithTimeout; abstract class ControlConnectionTestBase { protected static final InetSocketAddress ADDRESS1 = new InetSocketAddress("127.0.0.1", 9042); - protected static final InetSocketAddress ADDRESS2 = new InetSocketAddress("127.0.0.2", 9042); + + /** How long we wait when verifying mocks for async invocations */ + protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(500); @Mock protected InternalDriverContext context; @Mock protected DriverConfig config; @@ -120,6 +123,8 @@ public void setup() { mockQueryPlan(node1, node2); when(metadataManager.refreshNodes()).thenReturn(CompletableFuture.completedFuture(null)); + when(metadataManager.refreshSchema(anyString(), anyBoolean(), anyBoolean())) + .thenReturn(CompletableFuture.completedFuture(null)); when(context.getMetadataManager()).thenReturn(metadataManager); when(context.getConfig()).thenReturn(config); @@ -170,17 +175,4 @@ protected DriverChannel newMockDriverChannel(int id) { .thenReturn(new DefaultEndPoint(new InetSocketAddress("127.0.0." + id, 9042))); return driverChannel; } - - // Wait for all the tasks on the admin executor to complete. - protected void waitForPendingAdminTasks() { - // This works because the event loop group is single-threaded - Future f = adminEventLoopGroup.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 100, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java new file mode 100644 index 00000000000..954cf0e14a0 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ConversionsTest.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.cql.ColumnDefinition; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.List; +import org.junit.Test; + +public class ConversionsTest { + @Test + public void should_find_pk_indices_if_all_bound() { + assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk"))).containsExactly(0); + assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "c"))) + .containsExactly(0); + assertThat(Conversions.findIndices(partitionKey("pk"), variables("c", "pk"))) + .containsExactly(1); + assertThat( + Conversions.findIndices( + partitionKey("pk1", "pk2", "pk3"), + variables("c1", "pk2", "pk3", "c2", "pk1", "c3"))) + .containsExactly(4, 1, 2); + } + + @Test + public void should_use_first_pk_index_if_bound_multiple_times() { + assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "pk"))) + .containsExactly(0); + assertThat(Conversions.findIndices(partitionKey("pk"), variables("pk", "c1", "pk", "c2"))) + .containsExactly(0); + assertThat( + Conversions.findIndices( + partitionKey("pk1", "pk2", "pk3"), + variables("c1", "pk2", "pk3", "c2", "pk1", "c3", "pk1", "pk2"))) + .containsExactly(4, 1, 2); + } + + @Test + public void should_return_empty_pk_indices_if_at_least_one_component_not_bound() { + assertThat(Conversions.findIndices(partitionKey("pk"), variables("c1", "c2"))).isEmpty(); + assertThat( + Conversions.findIndices( + partitionKey("pk1", "pk2", "pk3"), variables("c1", "pk2", "c2", "pk1", "c3"))) + .isEmpty(); + } + + private List partitionKey(String... columnNames) { + ImmutableList.Builder columns = + ImmutableList.builderWithExpectedSize(columnNames.length); + for (String columnName : columnNames) { + ColumnMetadata column = mock(ColumnMetadata.class); + when(column.getName()).thenReturn(CqlIdentifier.fromInternal(columnName)); + columns.add(column); + } + return columns.build(); + } + + private ColumnDefinitions variables(String... columnNames) { + ImmutableList.Builder columns = + ImmutableList.builderWithExpectedSize(columnNames.length); + for (String columnName : columnNames) { + ColumnDefinition column = mock(ColumnDefinition.class); + when(column.getName()).thenReturn(CqlIdentifier.fromInternal(columnName)); + columns.add(column); + } + return DefaultColumnDefinitions.valueOf(columns.build()); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java index ced7d095ee1..1924ef5a9af 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlPrepareHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; +import static com.datastax.oss.driver.internal.core.cql.CqlRequestHandlerTestBase.defaultFrameOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; @@ -24,20 +27,20 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; import com.datastax.oss.driver.api.core.cql.PreparedStatement; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.retry.RetryDecision; import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; import com.datastax.oss.driver.api.core.servererrors.OverloadedException; import com.datastax.oss.driver.internal.core.channel.ResponseCallback; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.protocol.internal.Frame; import com.datastax.oss.protocol.internal.Message; import com.datastax.oss.protocol.internal.ProtocolConstants; import com.datastax.oss.protocol.internal.request.Prepare; @@ -48,7 +51,7 @@ import com.datastax.oss.protocol.internal.response.result.RowsMetadata; import com.datastax.oss.protocol.internal.util.Bytes; import java.nio.ByteBuffer; -import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.CompletionStage; import org.junit.Before; @@ -179,8 +182,8 @@ public void should_retry_initial_prepare_if_recoverable_error() { when(harness .getContext() .getRetryPolicy(anyString()) - .onErrorResponse(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryDecision.RETRY_NEXT); + .onErrorResponseVerdict(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) + .thenReturn(RetryVerdict.RETRY_NEXT); CompletionStage prepareFuture = new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") @@ -212,8 +215,8 @@ public void should_not_retry_initial_prepare_if_unrecoverable_error() { when(harness .getContext() .getRetryPolicy(anyString()) - .onErrorResponse(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryDecision.RETHROW); + .onErrorResponseVerdict(eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) + .thenReturn(RetryVerdict.RETHROW); CompletionStage prepareFuture = new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") @@ -230,6 +233,39 @@ public void should_not_retry_initial_prepare_if_unrecoverable_error() { } } + @Test + public void should_fail_if_nodes_unavailable() { + RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); + try (RequestHandlerTestHarness harness = + harnessBuilder.withEmptyPool(node1).withEmptyPool(node2).build()) { + CompletionStage prepareFuture = + new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") + .handle(); + assertThatStage(prepareFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(AllNodesFailedException.class); + Map> allErrors = + ((AllNodesFailedException) error).getAllErrors(); + assertThat(allErrors).hasSize(2); + assertThat(allErrors) + .hasEntrySatisfying( + node1, + nodeErrors -> + assertThat(nodeErrors) + .singleElement() + .isInstanceOf(NodeUnavailableException.class)); + assertThat(allErrors) + .hasEntrySatisfying( + node2, + nodeErrors -> + assertThat(nodeErrors) + .singleElement() + .isInstanceOf(NodeUnavailableException.class)); + }); + } + } + @Test public void should_fail_if_retry_policy_ignores_error() { RequestHandlerTestHarness.Builder harnessBuilder = @@ -245,9 +281,9 @@ public void should_fail_if_retry_policy_ignores_error() { // Make node1's error unrecoverable, will rethrow RetryPolicy mockRetryPolicy = harness.getContext().getRetryPolicy(DriverExecutionProfile.DEFAULT_NAME); - when(mockRetryPolicy.onErrorResponse( + when(mockRetryPolicy.onErrorResponseVerdict( eq(PREPARE_REQUEST), any(OverloadedException.class), eq(0))) - .thenReturn(RetryDecision.IGNORE); + .thenReturn(RetryVerdict.IGNORE); CompletionStage prepareFuture = new CqlPrepareHandler(PREPARE_REQUEST, harness.getSession(), harness.getContext(), "test") @@ -320,16 +356,6 @@ public void should_propagate_custom_payload_on_all_nodes() { } } - private static Frame defaultFrameOf(Message responseMessage) { - return Frame.forResponse( - DefaultProtocolVersion.V4.getCode(), - 0, - null, - Frame.NO_PAYLOAD, - Collections.emptyList(), - responseMessage); - } - private static Message simplePrepared() { RowsMetadata variablesMetadata = new RowsMetadata( diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java index 0e503a134c8..ccac873c616 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerRetryTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,11 +36,11 @@ import com.datastax.oss.driver.api.core.cql.AsyncResultSet; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.retry.RetryDecision; import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; @@ -46,6 +48,8 @@ import com.datastax.oss.driver.api.core.servererrors.ServerError; import com.datastax.oss.driver.api.core.servererrors.UnavailableException; import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; import com.datastax.oss.protocol.internal.ProtocolConstants; import com.datastax.oss.protocol.internal.response.Error; import com.datastax.oss.protocol.internal.response.error.ReadTimeout; @@ -53,9 +57,13 @@ import com.datastax.oss.protocol.internal.response.error.WriteTimeout; import com.tngtech.java.junit.dataprovider.DataProvider; import com.tngtech.java.junit.dataprovider.UseDataProvider; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.Test; public class CqlRequestHandlerRetryTest extends CqlRequestHandlerTestBase { @@ -63,7 +71,7 @@ public class CqlRequestHandlerRetryTest extends CqlRequestHandlerTestBase { @Test @UseDataProvider("allIdempotenceConfigs") public void should_always_try_next_node_if_bootstrapping( - boolean defaultIdempotence, SimpleStatement statement) { + boolean defaultIdempotence, Statement statement) { try (RequestHandlerTestHarness harness = RequestHandlerTestHarness.builder() .withDefaultIdempotence(defaultIdempotence) @@ -105,7 +113,7 @@ public void should_always_try_next_node_if_bootstrapping( @Test @UseDataProvider("allIdempotenceConfigs") public void should_always_rethrow_query_validation_error( - boolean defaultIdempotence, SimpleStatement statement) { + boolean defaultIdempotence, Statement statement) { try (RequestHandlerTestHarness harness = RequestHandlerTestHarness.builder() .withDefaultIdempotence(defaultIdempotence) @@ -145,15 +153,15 @@ public void should_always_rethrow_query_validation_error( @Test @UseDataProvider("failureAndIdempotent") public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, SimpleStatement statement) { + FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); failureScenario.mockRequestError(harnessBuilder, node1); harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyDecision( - harness.getContext().getRetryPolicy(anyString()), RetryDecision.RETRY_NEXT); + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); CompletionStage resultSetFuture = new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") @@ -196,15 +204,15 @@ public void should_try_next_node_if_idempotent_and_retry_policy_decides_so( @Test @UseDataProvider("failureAndIdempotent") public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, SimpleStatement statement) { + FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); failureScenario.mockRequestError(harnessBuilder, node1); harnessBuilder.withResponse(node1, defaultFrameOf(singleRow())); try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyDecision( - harness.getContext().getRetryPolicy(anyString()), RetryDecision.RETRY_SAME); + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_SAME); CompletionStage resultSetFuture = new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") @@ -247,14 +255,14 @@ public void should_try_same_node_if_idempotent_and_retry_policy_decides_so( @Test @UseDataProvider("failureAndIdempotent") public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, SimpleStatement statement) { + FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); failureScenario.mockRequestError(harnessBuilder, node1); try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyDecision( - harness.getContext().getRetryPolicy(anyString()), RetryDecision.IGNORE); + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.IGNORE); CompletionStage resultSetFuture = new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") @@ -295,15 +303,15 @@ public void should_ignore_error_if_idempotent_and_retry_policy_decides_so( @Test @UseDataProvider("failureAndIdempotent") public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( - FailureScenario failureScenario, boolean defaultIdempotence, SimpleStatement statement) { + FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); failureScenario.mockRequestError(harnessBuilder, node1); try (RequestHandlerTestHarness harness = harnessBuilder.build()) { - failureScenario.mockRetryPolicyDecision( - harness.getContext().getRetryPolicy(anyString()), RetryDecision.RETHROW); + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); CompletionStage resultSetFuture = new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") @@ -333,7 +341,7 @@ public void should_rethrow_error_if_idempotent_and_retry_policy_decides_so( @Test @UseDataProvider("failureAndNotIdempotent") public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_rethrows( - FailureScenario failureScenario, boolean defaultIdempotence, SimpleStatement statement) { + FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { // For two of the possible exceptions, the retry policy is called even if the statement is not // idempotent @@ -349,8 +357,8 @@ public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_re try (RequestHandlerTestHarness harness = harnessBuilder.build()) { if (shouldCallRetryPolicy) { - failureScenario.mockRetryPolicyDecision( - harness.getContext().getRetryPolicy(anyString()), RetryDecision.RETHROW); + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETHROW); } CompletionStage resultSetFuture = @@ -382,6 +390,63 @@ public void should_rethrow_error_if_not_idempotent_and_error_unsafe_or_policy_re } } + @Test + @UseDataProvider("failureAndIdempotent") + public void should_not_fail_with_duplicate_key_when_retrying_with_request_id_generator( + FailureScenario failureScenario, boolean defaultIdempotence, Statement statement) { + + // Create a RequestIdGenerator that uses the same key as the statement's custom payload + RequestIdGenerator requestIdGenerator = + new RequestIdGenerator() { + private AtomicInteger counter = new AtomicInteger(0); + + @Override + public String getSessionRequestId() { + return "session-123"; + } + + @Override + public String getNodeRequestId(@NonNull Request request, @NonNull String parentId) { + return parentId + "-" + counter.getAndIncrement(); + } + }; + + RequestHandlerTestHarness.Builder harnessBuilder = + RequestHandlerTestHarness.builder() + .withDefaultIdempotence(defaultIdempotence) + .withRequestIdGenerator(requestIdGenerator); + failureScenario.mockRequestError(harnessBuilder, node1); + harnessBuilder.withResponse(node2, defaultFrameOf(singleRow())); + + try (RequestHandlerTestHarness harness = harnessBuilder.build()) { + failureScenario.mockRetryPolicyVerdict( + harness.getContext().getRetryPolicy(anyString()), RetryVerdict.RETRY_NEXT); + + CompletionStage resultSetFuture = + new CqlRequestHandler(statement, harness.getSession(), harness.getContext(), "test") + .handle(); + + // The test should succeed without throwing a duplicate key exception + assertThatStage(resultSetFuture) + .isSuccess( + resultSet -> { + Iterator rows = resultSet.currentPage().iterator(); + assertThat(rows.hasNext()).isTrue(); + assertThat(rows.next().getString("message")).isEqualTo("hello, world"); + + ExecutionInfo executionInfo = resultSet.getExecutionInfo(); + assertThat(executionInfo.getCoordinator()).isEqualTo(node2); + assertThat(executionInfo.getErrors()).hasSize(1); + assertThat(executionInfo.getErrors().get(0).getKey()).isEqualTo(node1); + + // Verify that the custom payload still contains the request ID key + // (either the original value or the generated one, depending on implementation) + assertThat(executionInfo.getRequest().getCustomPayload().get("request-id")) + .isEqualTo(ByteBuffer.wrap("session-123-1".getBytes(StandardCharsets.UTF_8))); + }); + } + } + /** * Sets up the mocks to simulate an error from a node, and make the retry policy return a given * decision for that error. @@ -405,7 +470,7 @@ protected FailureScenario( abstract void mockRequestError(RequestHandlerTestHarness.Builder builder, Node node); - abstract void mockRetryPolicyDecision(RetryPolicy policy, RetryDecision decision); + abstract void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict); } @DataProvider @@ -426,15 +491,15 @@ public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node nod } @Override - public void mockRetryPolicyDecision(RetryPolicy policy, RetryDecision decision) { - when(policy.onReadTimeout( - any(SimpleStatement.class), + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onReadTimeoutVerdict( + any(Statement.class), eq(DefaultConsistencyLevel.LOCAL_ONE), eq(2), eq(1), eq(true), eq(0))) - .thenReturn(decision); + .thenReturn(verdict); } }, new FailureScenario( @@ -456,15 +521,15 @@ public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node nod } @Override - public void mockRetryPolicyDecision(RetryPolicy policy, RetryDecision decision) { - when(policy.onWriteTimeout( - any(SimpleStatement.class), + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onWriteTimeoutVerdict( + any(Statement.class), eq(DefaultConsistencyLevel.LOCAL_ONE), eq(DefaultWriteType.SIMPLE), eq(2), eq(1), eq(0))) - .thenReturn(decision); + .thenReturn(verdict); } }, new FailureScenario( @@ -482,14 +547,14 @@ public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node nod } @Override - public void mockRetryPolicyDecision(RetryPolicy policy, RetryDecision decision) { - when(policy.onUnavailable( - any(SimpleStatement.class), + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onUnavailableVerdict( + any(Statement.class), eq(DefaultConsistencyLevel.LOCAL_ONE), eq(2), eq(1), eq(0))) - .thenReturn(decision); + .thenReturn(verdict); } }, new FailureScenario( @@ -506,9 +571,9 @@ public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node nod } @Override - public void mockRetryPolicyDecision(RetryPolicy policy, RetryDecision decision) { - when(policy.onErrorResponse(any(SimpleStatement.class), any(ServerError.class), eq(0))) - .thenReturn(decision); + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onErrorResponseVerdict(any(Statement.class), any(ServerError.class), eq(0))) + .thenReturn(verdict); } }, new FailureScenario( @@ -522,10 +587,10 @@ public void mockRequestError(RequestHandlerTestHarness.Builder builder, Node nod } @Override - public void mockRetryPolicyDecision(RetryPolicy policy, RetryDecision decision) { - when(policy.onRequestAborted( - any(SimpleStatement.class), any(HeartbeatException.class), eq(0))) - .thenReturn(decision); + public void mockRetryPolicyVerdict(RetryPolicy policy, RetryVerdict verdict) { + when(policy.onRequestAbortedVerdict( + any(Statement.class), any(HeartbeatException.class), eq(0))) + .thenReturn(verdict); } }); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java index 2eca70f1dc2..a09a9eb3d5a 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerSpeculativeExecutionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,7 +30,7 @@ import com.datastax.oss.driver.api.core.NoNodeAvailableException; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; import com.datastax.oss.driver.api.core.servererrors.BootstrappingException; @@ -37,6 +39,7 @@ import com.datastax.oss.protocol.internal.ProtocolConstants; import com.datastax.oss.protocol.internal.response.Error; import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.List; import java.util.Map; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; @@ -47,7 +50,7 @@ public class CqlRequestHandlerSpeculativeExecutionTest extends CqlRequestHandler @Test @UseDataProvider("nonIdempotentConfig") public void should_not_schedule_speculative_executions_if_not_idempotent( - boolean defaultIdempotence, SimpleStatement statement) { + boolean defaultIdempotence, Statement statement) { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); @@ -71,7 +74,7 @@ public void should_not_schedule_speculative_executions_if_not_idempotent( @Test @UseDataProvider("idempotentConfig") public void should_schedule_speculative_executions( - boolean defaultIdempotence, SimpleStatement statement) throws Exception { + boolean defaultIdempotence, Statement statement) throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); @@ -133,7 +136,7 @@ public void should_schedule_speculative_executions( @Test @UseDataProvider("idempotentConfig") public void should_not_start_execution_if_result_complete( - boolean defaultIdempotence, SimpleStatement statement) throws Exception { + boolean defaultIdempotence, Statement statement) throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); @@ -191,7 +194,7 @@ public void should_not_start_execution_if_result_complete( @Test @UseDataProvider("idempotentConfig") - public void should_fail_if_no_nodes(boolean defaultIdempotence, SimpleStatement statement) { + public void should_fail_if_no_nodes(boolean defaultIdempotence, Statement statement) { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); // No configured behaviors => will yield an empty query plan @@ -218,7 +221,7 @@ public void should_fail_if_no_nodes(boolean defaultIdempotence, SimpleStatement @Test @UseDataProvider("idempotentConfig") public void should_fail_if_no_more_nodes_and_initial_execution_is_last( - boolean defaultIdempotence, SimpleStatement statement) throws Exception { + boolean defaultIdempotence, Statement statement) throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); @@ -261,10 +264,11 @@ public void should_fail_if_no_more_nodes_and_initial_execution_is_last( .isFailed( error -> { assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map nodeErrors = ((AllNodesFailedException) error).getErrors(); + Map> nodeErrors = + ((AllNodesFailedException) error).getAllErrors(); assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2)).isInstanceOf(BootstrappingException.class); + assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); + assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); }); } } @@ -272,7 +276,7 @@ public void should_fail_if_no_more_nodes_and_initial_execution_is_last( @Test @UseDataProvider("idempotentConfig") public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( - boolean defaultIdempotence, SimpleStatement statement) throws Exception { + boolean defaultIdempotence, Statement statement) throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); @@ -315,10 +319,11 @@ public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( .isFailed( error -> { assertThat(error).isInstanceOf(AllNodesFailedException.class); - Map nodeErrors = ((AllNodesFailedException) error).getErrors(); + Map> nodeErrors = + ((AllNodesFailedException) error).getAllErrors(); assertThat(nodeErrors).containsOnlyKeys(node1, node2); - assertThat(nodeErrors.get(node1)).isInstanceOf(BootstrappingException.class); - assertThat(nodeErrors.get(node2)).isInstanceOf(BootstrappingException.class); + assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class); + assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class); }); } } @@ -326,7 +331,7 @@ public void should_fail_if_no_more_nodes_and_speculative_execution_is_last( @Test @UseDataProvider("idempotentConfig") public void should_retry_in_speculative_executions( - boolean defaultIdempotence, SimpleStatement statement) throws Exception { + boolean defaultIdempotence, Statement statement) throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); @@ -375,7 +380,7 @@ public void should_retry_in_speculative_executions( @Test @UseDataProvider("idempotentConfig") public void should_stop_retrying_other_executions_if_result_complete( - boolean defaultIdempotence, SimpleStatement statement) throws Exception { + boolean defaultIdempotence, Statement statement) throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence); PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java index 78542f4adb5..c1a2765eef0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,9 +23,11 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.datastax.oss.driver.api.core.AllNodesFailedException; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.DriverTimeoutException; import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.NodeUnavailableException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; import com.datastax.oss.driver.api.core.cql.BoundStatement; @@ -31,6 +35,8 @@ import com.datastax.oss.driver.api.core.cql.ExecutionInfo; import com.datastax.oss.driver.api.core.cql.PreparedStatement; import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.session.RepreparePayload; import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; import com.datastax.oss.protocol.internal.request.Prepare; @@ -42,6 +48,8 @@ import java.time.Duration; import java.util.Collections; import java.util.Iterator; +import java.util.List; +import java.util.Map; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -104,6 +112,43 @@ public void should_fail_if_no_node_available() { } } + @Test + public void should_fail_if_nodes_unavailable() { + RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); + try (RequestHandlerTestHarness harness = + harnessBuilder.withEmptyPool(node1).withEmptyPool(node2).build()) { + CompletionStage resultSetFuture = + new CqlRequestHandler( + UNDEFINED_IDEMPOTENCE_STATEMENT, + harness.getSession(), + harness.getContext(), + "test") + .handle(); + assertThatStage(resultSetFuture) + .isFailed( + error -> { + assertThat(error).isInstanceOf(AllNodesFailedException.class); + Map> allErrors = + ((AllNodesFailedException) error).getAllErrors(); + assertThat(allErrors).hasSize(2); + assertThat(allErrors) + .hasEntrySatisfying( + node1, + nodeErrors -> + assertThat(nodeErrors) + .singleElement() + .isInstanceOf(NodeUnavailableException.class)); + assertThat(allErrors) + .hasEntrySatisfying( + node2, + nodeErrors -> + assertThat(nodeErrors) + .singleElement() + .isInstanceOf(NodeUnavailableException.class)); + }); + } + } + @Test public void should_time_out_if_first_node_takes_too_long_to_respond() throws Exception { RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); @@ -172,6 +217,7 @@ public void should_reprepare_on_the_fly_if_not_prepared() throws InterruptedExce BoundStatement boundStatement = mock(BoundStatement.class); when(boundStatement.getPreparedStatement()).thenReturn(preparedStatement); when(boundStatement.getValues()).thenReturn(Collections.emptyList()); + when(boundStatement.getNowInSeconds()).thenReturn(Statement.NO_NOW_IN_SECONDS); RequestHandlerTestHarness.Builder harnessBuilder = RequestHandlerTestHarness.builder(); // For the first attempt that gets the UNPREPARED response @@ -194,11 +240,11 @@ public void should_reprepare_on_the_fly_if_not_prepared() throws InterruptedExce // Before we proceed, mock the PREPARE exchange that will occur as soon as we complete the // first response. node1Behavior.mockFollowupRequest( - Prepare.class, defaultFrameOf(new Prepared(mockId.array(), null, null, null))); + Prepare.class, defaultFrameOf(new Prepared(Bytes.getArray(mockId), null, null, null))); node1Behavior.setWriteSuccess(); node1Behavior.setResponseSuccess( - defaultFrameOf(new Unprepared("mock message", mockId.array()))); + defaultFrameOf(new Unprepared("mock message", Bytes.getArray(mockId)))); // Should now re-prepare, re-execute and succeed. assertThatStage(resultSetFuture).isSuccess(); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java index 54fb1e3a7b3..9bd3b6fa28c 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,6 +23,8 @@ import com.datastax.oss.driver.TestDataProviders; import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchType; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.metrics.NodeMetric; import com.datastax.oss.driver.internal.core.metadata.DefaultNode; @@ -36,7 +40,6 @@ import com.datastax.oss.protocol.internal.util.Bytes; import com.tngtech.java.junit.dataprovider.DataProvider; import com.tngtech.java.junit.dataprovider.DataProviderRunner; -import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.Collections; @@ -56,9 +59,12 @@ public abstract class CqlRequestHandlerTestBase { SimpleStatement.builder("mock query").setIdempotence(true).build(); protected static final SimpleStatement NON_IDEMPOTENT_STATEMENT = SimpleStatement.builder("mock query").setIdempotence(false).build(); - protected static final InetSocketAddress ADDRESS1 = new InetSocketAddress("127.0.0.1", 9042); - protected static final InetSocketAddress ADDRESS2 = new InetSocketAddress("127.0.0.2", 9042); - protected static final InetSocketAddress ADDRESS3 = new InetSocketAddress("127.0.0.3", 9042); + protected static final BatchStatement UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT = + BatchStatement.newInstance(BatchType.LOGGED, UNDEFINED_IDEMPOTENCE_STATEMENT); + protected static final BatchStatement IDEMPOTENT_BATCH_STATEMENT = + BatchStatement.newInstance(BatchType.LOGGED, IDEMPOTENT_STATEMENT).setIdempotent(true); + protected static final BatchStatement NON_IDEMPOTENT_BATCH_STATEMENT = + BatchStatement.newInstance(BatchType.LOGGED, NON_IDEMPOTENT_STATEMENT).setIdempotent(false); @Mock protected DefaultNode node1; @Mock protected DefaultNode node2; @@ -118,6 +124,9 @@ public static Object[][] idempotentConfig() { new Object[] {true, UNDEFINED_IDEMPOTENCE_STATEMENT}, new Object[] {false, IDEMPOTENT_STATEMENT}, new Object[] {true, IDEMPOTENT_STATEMENT}, + new Object[] {true, UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT}, + new Object[] {false, IDEMPOTENT_BATCH_STATEMENT}, + new Object[] {true, IDEMPOTENT_BATCH_STATEMENT}, }; } @@ -131,6 +140,9 @@ public static Object[][] nonIdempotentConfig() { new Object[] {false, UNDEFINED_IDEMPOTENCE_STATEMENT}, new Object[] {true, NON_IDEMPOTENT_STATEMENT}, new Object[] {false, NON_IDEMPOTENT_STATEMENT}, + new Object[] {false, UNDEFINED_IDEMPOTENCE_BATCH_STATEMENT}, + new Object[] {true, NON_IDEMPOTENT_BATCH_STATEMENT}, + new Object[] {false, NON_IDEMPOTENT_BATCH_STATEMENT}, }; } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java index a6b4ebca2a2..ecc087fb8ac 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/CqlRequestHandlerTrackerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java index ebd9a6d0f0d..8ed509caeb7 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/DefaultAsyncResultSetTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -58,7 +60,7 @@ public class DefaultAsyncResultSetTest { public void setup() { MockitoAnnotations.initMocks(this); - when(executionInfo.getStatement()).thenAnswer(invocation -> statement); + when(executionInfo.getRequest()).thenAnswer(invocation -> statement); when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); when(context.getProtocolVersion()).thenReturn(DefaultProtocolVersion.DEFAULT); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java index a7d7e66ec81..d6787cc018e 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PagingIterableSpliteratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,18 +20,13 @@ import static java.util.stream.StreamSupport.stream; import static org.assertj.core.api.Assertions.assertThat; -import com.datastax.oss.driver.api.core.PagingIterable; -import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; -import com.datastax.oss.driver.api.core.cql.ExecutionInfo; -import com.datastax.oss.driver.internal.core.cql.PagingIterableSpliterator.Builder; +import com.datastax.oss.driver.internal.core.MockPagingIterable; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.Lists; import com.tngtech.java.junit.dataprovider.DataProvider; import com.tngtech.java.junit.dataprovider.DataProviderRunner; import com.tngtech.java.junit.dataprovider.UseDataProvider; -import edu.umd.cs.findbugs.annotations.NonNull; import java.util.ArrayList; -import java.util.Iterator; import java.util.List; import java.util.Spliterator; import java.util.function.Consumer; @@ -46,7 +43,7 @@ public class PagingIterableSpliteratorTest { public void should_split_with_estimated_size( int size, int chunkSize, List expectedLeft, List expectedRight) { // given - Builder builder = + PagingIterableSpliterator.Builder builder = PagingIterableSpliterator.builder(iterableOfSize(size)) .withEstimatedSize(size) .withChunkSize(chunkSize); @@ -87,6 +84,7 @@ public void should_split_with_estimated_size( @DataProvider public static Iterable splitsWithEstimatedSize() { List> arguments = new ArrayList<>(); + arguments.add(Lists.newArrayList(0, 1, ImmutableList.of(), ImmutableList.of())); arguments.add(Lists.newArrayList(1, 1, ImmutableList.of(), ImmutableList.of(0))); arguments.add(Lists.newArrayList(1, 2, ImmutableList.of(), ImmutableList.of(0))); arguments.add(Lists.newArrayList(2, 1, ImmutableList.of(0), ImmutableList.of(1))); @@ -110,7 +108,7 @@ public static Iterable splitsWithEstimatedSize() { public void should_split_with_unknown_size( int size, int chunkSize, List expectedLeft, List expectedRight) { // given - Builder builder = + PagingIterableSpliterator.Builder builder = PagingIterableSpliterator.builder(iterableOfSize(size)).withChunkSize(chunkSize); // when PagingIterableSpliterator right = builder.build(); @@ -145,6 +143,7 @@ public void should_split_with_unknown_size( @DataProvider public static Iterable splitsWithUnknownSize() { List> arguments = new ArrayList<>(); + arguments.add(Lists.newArrayList(0, 1, ImmutableList.of(), ImmutableList.of())); arguments.add(Lists.newArrayList(1, 1, ImmutableList.of(0), ImmutableList.of())); arguments.add(Lists.newArrayList(1, 2, ImmutableList.of(0), ImmutableList.of())); arguments.add(Lists.newArrayList(2, 1, ImmutableList.of(0), ImmutableList.of(1))); @@ -223,48 +222,6 @@ private static MockPagingIterable iterableOfSize(int size) { IntStream.range(0, size).boxed().collect(Collectors.toList()).iterator()); } - private static class MockPagingIterable implements PagingIterable { - - private final Iterator iterator; - - private MockPagingIterable(Iterator iterator) { - this.iterator = iterator; - } - - @NonNull - @Override - public Iterator iterator() { - return iterator; - } - - @Override - public boolean isFullyFetched() { - return !iterator.hasNext(); - } - - @NonNull - @Override - public ColumnDefinitions getColumnDefinitions() { - throw new UnsupportedOperationException("irrelevant"); - } - - @NonNull - @Override - public List getExecutionInfos() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public int getAvailableWithoutFetching() { - throw new UnsupportedOperationException("irrelevant"); - } - - @Override - public boolean wasApplied() { - throw new UnsupportedOperationException("irrelevant"); - } - } - private static class TestConsumer implements Consumer { private final List items = new ArrayList<>(); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java index 55594f46aed..9b018f17531 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/PoolBehavior.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +34,7 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.EventLoop; import io.netty.channel.socket.DefaultSocketChannelConfig; -import io.netty.util.concurrent.GlobalEventExecutor; +import io.netty.util.concurrent.ImmediateEventExecutor; import io.netty.util.concurrent.Promise; import java.util.concurrent.CompletableFuture; @@ -59,7 +61,8 @@ public PoolBehavior(Node node, boolean createChannel) { this.channel = mock(DriverChannel.class); EventLoop eventLoop = mock(EventLoop.class); ChannelConfig config = mock(DefaultSocketChannelConfig.class); - this.writePromise = GlobalEventExecutor.INSTANCE.newPromise(); + this.writePromise = ImmediateEventExecutor.INSTANCE.newPromise(); + when(channel.preAcquireId()).thenReturn(true); when(channel.write(any(Message.class), anyBoolean(), anyMap(), any(ResponseCallback.class))) .thenAnswer( invocation -> { @@ -110,7 +113,7 @@ public DriverChannel getChannel() { /** Mocks a follow-up request on the same channel. */ public void mockFollowupRequest(Class expectedMessage, Frame responseFrame) { - Promise writePromise2 = GlobalEventExecutor.INSTANCE.newPromise(); + Promise writePromise2 = ImmediateEventExecutor.INSTANCE.newPromise(); CompletableFuture callbackFuture2 = new CompletableFuture<>(); when(channel.write(any(expectedMessage), anyBoolean(), anyMap(), any(ResponseCallback.class))) .thenAnswer( diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java index f84abfe39f7..dc238775bc1 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/QueryTraceFetcherTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,6 +32,7 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; import com.datastax.oss.driver.api.core.cql.QueryTrace; import com.datastax.oss.driver.api.core.cql.Row; @@ -67,6 +70,7 @@ public class QueryTraceFetcherTest { private static final UUID TRACING_ID = UUID.randomUUID(); private static final ByteBuffer PAGING_STATE = Bytes.fromHexString("0xdeadbeef"); + private static final int PORT = 7000; @Mock private CqlSession session; @Mock private InternalDriverContext context; @@ -75,7 +79,7 @@ public class QueryTraceFetcherTest { @Mock private NettyOptions nettyOptions; @Mock private EventExecutorGroup adminEventExecutorGroup; @Mock private EventExecutor eventExecutor; - @Mock private InetAddress address; + private InetAddress address = InetAddress.getLoopbackAddress(); @Captor private ArgumentCaptor statementCaptor; @@ -134,7 +138,8 @@ public void should_succeed_when_both_queries_succeed_immediately() { assertThat(trace.getTracingId()).isEqualTo(TRACING_ID); assertThat(trace.getRequestType()).isEqualTo("mock request"); assertThat(trace.getDurationMicros()).isEqualTo(42); - assertThat(trace.getCoordinator()).isEqualTo(address); + assertThat(trace.getCoordinatorAddress().getAddress()).isEqualTo(address); + assertThat(trace.getCoordinatorAddress().getPort()).isEqualTo(PORT); assertThat(trace.getParameters()) .hasSize(2) .containsEntry("key1", "value1") @@ -147,7 +152,9 @@ public void should_succeed_when_both_queries_succeed_immediately() { TraceEvent event = events.get(i); assertThat(event.getActivity()).isEqualTo("mock activity " + i); assertThat(event.getTimestamp()).isEqualTo(i); - assertThat(event.getSource()).isEqualTo(address); + assertThat(event.getSourceAddress()).isNotNull(); + assertThat(event.getSourceAddress().getAddress()).isEqualTo(address); + assertThat(event.getSourceAddress().getPort()).isEqualTo(PORT); assertThat(event.getSourceElapsedMicros()).isEqualTo(i); assertThat(event.getThreadName()).isEqualTo("mock thread " + i); } @@ -214,7 +221,8 @@ public void should_retry_when_session_row_is_incomplete() { assertThat(trace.getTracingId()).isEqualTo(TRACING_ID); assertThat(trace.getRequestType()).isEqualTo("mock request"); assertThat(trace.getDurationMicros()).isEqualTo(42); - assertThat(trace.getCoordinator()).isEqualTo(address); + assertThat(trace.getCoordinatorAddress().getAddress()).isEqualTo(address); + assertThat(trace.getCoordinatorAddress().getPort()).isEqualTo(PORT); assertThat(trace.getParameters()) .hasSize(2) .containsEntry("key1", "value1") @@ -227,7 +235,9 @@ public void should_retry_when_session_row_is_incomplete() { TraceEvent event = events.get(i); assertThat(event.getActivity()).isEqualTo("mock activity " + i); assertThat(event.getTimestamp()).isEqualTo(i); - assertThat(event.getSource()).isEqualTo(address); + assertThat(event.getSourceAddress()).isNotNull(); + assertThat(event.getSourceAddress().getAddress()).isEqualTo(address); + assertThat(event.getSourceAddress().getPort()).isEqualTo(PORT); assertThat(event.getSourceElapsedMicros()).isEqualTo(i); assertThat(event.getThreadName()).isEqualTo("mock thread " + i); } @@ -294,6 +304,8 @@ private CompletionStage incompleteSessionRow() { private CompletionStage sessionRow(Integer duration) { Row row = mock(Row.class); + ColumnDefinitions definitions = mock(ColumnDefinitions.class); + when(row.getColumnDefinitions()).thenReturn(definitions); when(row.getString("request")).thenReturn("mock request"); if (duration == null) { when(row.isNull("duration")).thenReturn(true); @@ -301,6 +313,8 @@ private CompletionStage sessionRow(Integer duration) { when(row.getInt("duration")).thenReturn(duration); } when(row.getInetAddress("coordinator")).thenReturn(address); + when(definitions.contains("coordinator_port")).thenReturn(true); + when(row.getInt("coordinator_port")).thenReturn(PORT); when(row.getMap("parameters", String.class, String.class)) .thenReturn(ImmutableMap.of("key1", "value1", "key2", "value2")); when(row.isNull("started_at")).thenReturn(false); @@ -355,9 +369,13 @@ private CompletionStage multiPageEventRows2() { private Row eventRow(int i) { Row row = mock(Row.class); + ColumnDefinitions definitions = mock(ColumnDefinitions.class); + when(row.getColumnDefinitions()).thenReturn(definitions); when(row.getString("activity")).thenReturn("mock activity " + i); when(row.getUuid("event_id")).thenReturn(Uuids.startOf(i)); when(row.getInetAddress("source")).thenReturn(address); + when(definitions.contains("source_port")).thenReturn(true); + when(row.getInt("source_port")).thenReturn(PORT); when(row.getInt("source_elapsed")).thenReturn(i); when(row.getString("thread")).thenReturn("mock thread " + i); return row; diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java index 11ba0cd5f55..6a7657d5809 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/RequestHandlerTestHarness.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,6 +29,7 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metrics.SessionMetric; import com.datastax.oss.driver.api.core.retry.RetryPolicy; @@ -34,6 +37,8 @@ import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; import com.datastax.oss.driver.api.core.time.TimestampGenerator; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; import com.datastax.oss.driver.internal.core.ProtocolFeature; import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; @@ -48,17 +53,16 @@ import com.datastax.oss.driver.internal.core.session.DefaultSession; import com.datastax.oss.driver.internal.core.session.throttling.PassThroughRequestThrottler; import com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer; import com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout; import com.datastax.oss.protocol.internal.Frame; import io.netty.channel.EventLoopGroup; -import io.netty.util.TimerTask; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; @@ -129,9 +133,9 @@ protected RequestHandlerTestHarness(Builder builder) { .thenReturn(-1L); when(context.getSpeculativeExecutionPolicy(anyString())).thenReturn(speculativeExecutionPolicy); - when(context.getCodecRegistry()).thenReturn(new DefaultCodecRegistry("test")); + when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); - when(timestampGenerator.next()).thenReturn(Long.MIN_VALUE); + when(timestampGenerator.next()).thenReturn(Statement.NO_DEFAULT_TIMESTAMP); when(context.getTimestampGenerator()).thenReturn(timestampGenerator); pools = builder.buildMockPools(); @@ -166,6 +170,9 @@ protected RequestHandlerTestHarness(Builder builder) { when(context.getRequestThrottler()).thenReturn(new PassThroughRequestThrottler(context)); when(context.getRequestTracker()).thenReturn(new NoopRequestTracker(context)); + + when(context.getRequestIdGenerator()) + .thenReturn(Optional.ofNullable(builder.requestIdGenerator)); } public DefaultSession getSession() { @@ -189,10 +196,6 @@ public CapturedTimeout nextScheduledTimeout() { return timer.getNextTimeout(); } - public void runNextTask() { - TimerTask task = timer.getNextTimeout().task(); - } - @Override public void close() { timer.stop(); @@ -202,6 +205,7 @@ public static class Builder { private final List poolBehaviors = new ArrayList<>(); private boolean defaultIdempotence; private ProtocolVersion protocolVersion; + private RequestIdGenerator requestIdGenerator; /** * Sets the given node as the next one in the query plan; an empty pool will be simulated when @@ -257,6 +261,11 @@ public Builder withProtocolVersion(ProtocolVersion protocolVersion) { return this; } + public Builder withRequestIdGenerator(RequestIdGenerator requestIdGenerator) { + this.requestIdGenerator = requestIdGenerator; + return this; + } + /** * Sets the given node as the next one in the query plan; the test code is responsible of * calling the methods on the returned object to complete the write and the query. diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java index 5ac3c8531d5..54b215458fe 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java index 3d52fc3d22e..0b5860f7e95 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/ResultSetsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java index 59ca780136f..dc3ab0702f7 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/cql/StatementSizeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +34,7 @@ import com.datastax.oss.driver.api.core.detach.AttachmentPoint; import com.datastax.oss.driver.api.core.time.TimestampGenerator; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; -import com.datastax.oss.driver.internal.core.CassandraProtocolVersionRegistry; +import com.datastax.oss.driver.internal.core.DefaultProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.shaded.guava.common.base.Charsets; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; @@ -84,7 +86,7 @@ public void setup() { when(driverContext.getProtocolVersion()).thenReturn(DefaultProtocolVersion.V5); when(driverContext.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); when(driverContext.getProtocolVersionRegistry()) - .thenReturn(new CassandraProtocolVersionRegistry(null)); + .thenReturn(new DefaultProtocolVersionRegistry(null)); when(config.getDefaultProfile()).thenReturn(defaultProfile); when(driverContext.getConfig()).thenReturn(config); when(driverContext.getTimestampGenerator()).thenReturn(timestampGenerator); @@ -170,7 +172,7 @@ public void should_measure_size_of_bound_statement() { + (2 + PREPARED_ID.length) + (2 + RESULT_METADATA_ID.length) + 2 // size of value list - + 2 * (4) // two null values (size = -1) + + 2 * 4 // two null values (size = -1) + 4 // fetch size ; assertThat(v5SizeOf(statement)).isEqualTo(expectedSize); @@ -284,6 +286,7 @@ private BoundStatement newBoundStatement( null, CodecRegistry.DEFAULT, DefaultProtocolVersion.V5, - null); + null, + Statement.NO_NOW_IN_SECONDS); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java index ad3ee2f199e..c27b55e3f25 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIdTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -468,15 +470,15 @@ public void should_get_with_explicit_codec_by_name() { assertThat(s).isEqualTo("1"); } - @SuppressWarnings("UnusedAssignment") @Test(expected = IllegalArgumentException.class) + @SuppressWarnings("CheckReturnValue") public void should_fail_when_id_does_not_exists() { final CqlIdentifier invalidField = CqlIdentifier.fromInternal("invalidField"); // Given T t = newInstance(ImmutableList.of(DataTypes.INT), attachmentPoint); // When - t = t.setInt(invalidField, 1); + t.setInt(invalidField, 1); // Then the method will throw IllegalArgumentException up to the client. } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java index 3239a655ece..94da926f2bc 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/data/AccessibleByIndexTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java index 07c1dc42a89..aed357cb1cd 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultTupleValueTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java index c097528e46d..6a9f2886783 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/data/DefaultUdtValueTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -123,7 +125,7 @@ public void should_format_to_string() { UdtValue udt = type.newValue().setString("t", "foobar").setDouble("d", 3.14); - assertThat(udt.toString()).isEqualTo("{t:'foobar',i:NULL,d:3.14}"); + assertThat(udt.getFormattedContents()).isEqualTo("{t:'foobar',i:NULL,d:3.14}"); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java index 504b5a17740..697a32fb029 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/data/IdentifierIndexTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,15 +19,22 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.oss.driver.TestDataProviders; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Locale; import org.junit.Test; +import org.junit.runner.RunWith; +@RunWith(DataProviderRunner.class) public class IdentifierIndexTest { private static final CqlIdentifier Foo = CqlIdentifier.fromInternal("Foo"); private static final CqlIdentifier foo = CqlIdentifier.fromInternal("foo"); private static final CqlIdentifier fOO = CqlIdentifier.fromInternal("fOO"); - private IdentifierIndex index = new IdentifierIndex(ImmutableList.of(Foo, foo, fOO)); + private IdentifierIndex index = + new IdentifierIndex(ImmutableList.of(Foo, foo, fOO, Foo, foo, fOO)); @Test public void should_find_first_index_of_existing_identifier() { @@ -40,13 +49,31 @@ public void should_not_find_index_of_nonexistent_identifier() { } @Test - public void should_find_first_index_of_case_insensitive_name() { - assertThat(index.firstIndexOf("foo")).isEqualTo(0); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_find_first_index_of_case_insensitive_name(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(index.firstIndexOf("foo")).isEqualTo(0); + assertThat(index.firstIndexOf("FOO")).isEqualTo(0); + assertThat(index.firstIndexOf("fOO")).isEqualTo(0); + } finally { + Locale.setDefault(def); + } } @Test - public void should_not_find_first_index_of_nonexistent_case_insensitive_name() { - assertThat(index.firstIndexOf("bar")).isEqualTo(-1); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_not_find_first_index_of_nonexistent_case_insensitive_name(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(index.firstIndexOf("bar")).isEqualTo(-1); + assertThat(index.firstIndexOf("BAR")).isEqualTo(-1); + assertThat(index.firstIndexOf("bAR")).isEqualTo(-1); + } finally { + Locale.setDefault(def); + } } @Test @@ -60,4 +87,56 @@ public void should_find_first_index_of_case_sensitive_name() { public void should_not_find_index_of_nonexistent_case_sensitive_name() { assertThat(index.firstIndexOf("\"FOO\"")).isEqualTo(-1); } + + @Test + public void should_find_all_indices_of_existing_identifier() { + assertThat(index.allIndicesOf(Foo)).containsExactly(0, 3); + assertThat(index.allIndicesOf(foo)).containsExactly(1, 4); + assertThat(index.allIndicesOf(fOO)).containsExactly(2, 5); + } + + @Test + public void should_not_find_indices_of_nonexistent_identifier() { + assertThat(index.allIndicesOf(CqlIdentifier.fromInternal("FOO"))).isEmpty(); + } + + @Test + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_find_all_indices_of_case_insensitive_name(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(index.allIndicesOf("foo")).containsExactly(0, 1, 2, 3, 4, 5); + assertThat(index.allIndicesOf("FOO")).containsExactly(0, 1, 2, 3, 4, 5); + assertThat(index.allIndicesOf("fOO")).containsExactly(0, 1, 2, 3, 4, 5); + } finally { + Locale.setDefault(def); + } + } + + @Test + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_not_find_indices_of_nonexistent_case_insensitive_name(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(index.allIndicesOf("bar")).isEmpty(); + assertThat(index.allIndicesOf("BAR")).isEmpty(); + assertThat(index.allIndicesOf("bAR")).isEmpty(); + } finally { + Locale.setDefault(def); + } + } + + @Test + public void should_find_all_indices_of_case_sensitive_name() { + assertThat(index.allIndicesOf("\"Foo\"")).containsExactly(0, 3); + assertThat(index.allIndicesOf("\"foo\"")).containsExactly(1, 4); + assertThat(index.allIndicesOf("\"fOO\"")).containsExactly(2, 5); + } + + @Test + public void should_not_find_indices_of_nonexistent_case_sensitive_name() { + assertThat(index.allIndicesOf("\"FOO\"")).isEmpty(); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java new file mode 100644 index 00000000000..3c832812662 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcAgnosticTest.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.Optional; +import org.junit.Before; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class BasicLoadBalancingPolicyDcAgnosticTest extends BasicLoadBalancingPolicyQueryPlanTest { + + @Before + @Override + public void setup() { + super.setup(); + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + when(metadataManager.getMetadata()).thenReturn(metadata); + when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); + + // since there is no local datacenter defined, the policy should behave with DC awareness + // disabled and pick nodes regardless of their datacenters; we therefore expect all tests of + // BasicLoadBalancingPolicyQueryPlanTest to pass even with the below DC distribution. + when(node1.getDatacenter()).thenReturn("dc1"); + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + when(node4.getDatacenter()).thenReturn("dc4"); + when(node5.getDatacenter()).thenReturn(null); + + policy = createAndInitPolicy(); + + assertThat(policy.getLiveNodes().dc(null)).containsExactly(node1, node2, node3, node4, node5); + assertThat(policy.getLiveNodes().dcs()).isEmpty(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java new file mode 100644 index 00000000000..dc955c6e5de --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDcFailoverTest.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.Map; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class BasicLoadBalancingPolicyDcFailoverTest extends BasicLoadBalancingPolicyQueryPlanTest { + + @Mock protected DefaultNode node6; + @Mock protected DefaultNode node7; + @Mock protected DefaultNode node8; + @Mock protected DefaultNode node9; + + @Test + @Override + public void should_prioritize_single_replica() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); + + // node3 always first, round-robin on the rest, then remote nodes + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node1, node2, node4, node5, node7, node8); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node2, node1, node4, node5, node7, node8); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node1, node2, node4, node5, node7, node8); + + // Should not shuffle replicas since there is only one + verify(policy, never()).shuffleHead(any(), eq(1)); + // But should shuffle remote nodes + verify(policy, times(3)).shuffleHead(any(), eq(4)); + } + + @Test + @Override + public void should_prioritize_and_shuffle_replicas() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .thenReturn(ImmutableSet.of(node2, node3, node5, node8)); + + // node 5 and 8 being in a remote DC, they don't get a boost for being a replica + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node2, node3, node1, node4, node5, node7, node8); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node2, node3, node1, node4, node5, node7, node8); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node2, node3, node1, node4, node5, node7, node8); + + // should shuffle replicas + verify(policy, times(3)).shuffleHead(any(), eq(2)); + // should shuffle remote nodes + verify(policy, times(3)).shuffleHead(any(), eq(4)); + // No power of two choices with only two replicas + verify(session, never()).getPools(); + } + + @Override + protected void assertRoundRobinQueryPlans() { + // nodes 4 to 9 being in a remote DC, they always appear after nodes 1, 2, 3 + for (int i = 0; i < 3; i++) { + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node1, node2, node3, node4, node5, node7, node8); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node2, node3, node1, node4, node5, node7, node8); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node1, node2, node4, node5, node7, node8); + } + // should shuffle remote nodes + verify(policy, atLeast(1)).shuffleHead(any(), eq(4)); + } + + @Override + protected BasicLoadBalancingPolicy createAndInitPolicy() { + when(node4.getDatacenter()).thenReturn("dc2"); + when(node5.getDatacenter()).thenReturn("dc2"); + when(node6.getDatacenter()).thenReturn("dc2"); + when(node7.getDatacenter()).thenReturn("dc3"); + when(node8.getDatacenter()).thenReturn("dc3"); + when(node9.getDatacenter()).thenReturn("dc3"); + // Accept 2 nodes per remote DC + when(defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .thenReturn(2); + when(defaultProfile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) + .thenReturn(false); + // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was + // called (makes tests easier) + BasicLoadBalancingPolicy policy = + spy( + new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] currentNodes, int headLength) { + // nothing (keep in same order) + } + }); + Map nodes = + ImmutableMap.builder() + .put(UUID.randomUUID(), node1) + .put(UUID.randomUUID(), node2) + .put(UUID.randomUUID(), node3) + .put(UUID.randomUUID(), node4) + .put(UUID.randomUUID(), node5) + .put(UUID.randomUUID(), node6) + .put(UUID.randomUUID(), node7) + .put(UUID.randomUUID(), node8) + .put(UUID.randomUUID(), node9) + .build(); + policy.init(nodes, distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed + assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java new file mode 100644 index 00000000000..5b2b6bf864d --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyDistanceTest.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class BasicLoadBalancingPolicyDistanceTest extends LoadBalancingPolicyTestBase { + + @Mock private NodeDistanceEvaluator nodeDistanceEvaluator; + + private ImmutableMap nodes; + + @Before + @Override + public void setup() { + super.setup(); + when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) + .thenReturn(nodeDistanceEvaluator); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); + nodes = + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3); + } + + @Test + public void should_report_distance_reported_by_user_distance_reporter() { + // Given + given(node2.getDatacenter()).willReturn("dc2"); + given(nodeDistanceEvaluator.evaluateDistance(node1, "dc1")).willReturn(NodeDistance.LOCAL); + given(nodeDistanceEvaluator.evaluateDistance(node2, "dc1")).willReturn(NodeDistance.REMOTE); + given(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).willReturn(NodeDistance.IGNORED); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); + } + + @Test + public void should_report_LOCAL_when_dc_agnostic() { + // Given + given(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .willReturn(false); + given(node1.getDatacenter()).willReturn(null); + given(node2.getDatacenter()).willReturn("dc1"); + given(node3.getDatacenter()).willReturn("dc2"); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + assertThat(policy.getLiveNodes().dc(null)).containsExactly(node1, node2, node3); + } + + @Test + public void should_report_LOCAL_when_node_in_local_dc() { + // Given + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); + } + + @Test + public void should_report_IGNORED_when_node_not_in_local_dc() { + // Given + given(node1.getDatacenter()).willReturn(null); + given(node2.getDatacenter()).willReturn("dc2"); + given(node3.getDatacenter()).willReturn("dc3"); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + // Then + // Note: driver 3 would have reported LOCAL for node1 since its datacenter is null + verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc(null)).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc3")).isEmpty(); + } + + @Test + public void should_report_REMOTE_when_node_not_in_local_dc_and_dc_failover_enabled() { + // Given + given(node1.getDatacenter()).willReturn("dc2"); + given(node2.getDatacenter()).willReturn("dc3"); + given(node3.getDatacenter()).willReturn("dc4"); + given( + defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .willReturn(1); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); + assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1); + assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node2); + assertThat(policy.getLiveNodes().dc("dc4")).containsExactly(node3); + } + + @Test + public void should_report_IGNORED_when_node_not_in_local_dc_and_too_many_nodes_for_dc_failover() { + // Given + given(node1.getDatacenter()).willReturn("dc2"); + given(node2.getDatacenter()).willReturn("dc2"); + given(node3.getDatacenter()).willReturn("dc2"); + given( + defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .willReturn(2); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2); + } + + @Test + public void should_report_REMOTE_when_remote_node_up_and_dc_failover() { + // Given + given(node1.getDatacenter()).willReturn("dc2"); + given(node2.getDatacenter()).willReturn("dc2"); + given(node3.getDatacenter()).willReturn("dc2"); + given(node4.getDatacenter()).willReturn("dc2"); + given( + defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .willReturn(4); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + policy.onUp(node4); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node4, NodeDistance.REMOTE); + assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2, node3, node4); + } + + @Test + public void should_report_IGNORED_when_remote_node_up_and_too_many_nodes_for_dc_failover() { + // Given + given(node1.getDatacenter()).willReturn("dc2"); + given(node2.getDatacenter()).willReturn("dc2"); + given(node3.getDatacenter()).willReturn("dc2"); + given(node4.getDatacenter()).willReturn("dc2"); + given( + defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .willReturn(3); + BasicLoadBalancingPolicy policy = createPolicy(); + // When + policy.init(nodes, distanceReporter); + policy.onUp(node4); + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node2, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node3, NodeDistance.REMOTE); + verify(distanceReporter).setDistance(node4, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node1, node2, node3); + } + + @NonNull + protected BasicLoadBalancingPolicy createPolicy() { + return new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java new file mode 100644 index 00000000000..9959ddbd1bc --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyEventsTest.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistanceEvaluator; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class BasicLoadBalancingPolicyEventsTest extends LoadBalancingPolicyTestBase { + + @Mock private NodeDistanceEvaluator nodeDistanceEvaluator; + + private BasicLoadBalancingPolicy policy; + + @Before + @Override + public void setup() { + super.setup(); + when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) + .thenReturn(nodeDistanceEvaluator); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + policy = createAndInitPolicy(); + reset(distanceReporter); + } + + @Test + public void should_remove_down_node_from_live_set() { + // When + policy.onDown(node2); + + // Then + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); + verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); + // should have been called only once, during initialization, but not during onDown + verify(nodeDistanceEvaluator).evaluateDistance(node2, "dc1"); + } + + @Test + public void should_remove_removed_node_from_live_set() { + // When + policy.onRemove(node2); + + // Then + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); + verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); + // should have been called only once, during initialization, but not during onRemove + verify(nodeDistanceEvaluator).evaluateDistance(node2, "dc1"); + } + + @Test + public void should_set_added_node_to_local() { + // When + policy.onAdd(node3); + + // Then + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); + // Not added to the live set yet, we're waiting for the pool to open + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); + } + + @Test + public void should_ignore_added_node_when_filtered() { + // Given + when(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).thenReturn(NodeDistance.IGNORED); + + // When + policy.onAdd(node3); + + // Then + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); + } + + @Test + public void should_ignore_added_node_when_remote_dc() { + // Given + when(node3.getDatacenter()).thenReturn("dc2"); + + // When + policy.onAdd(node3); + + // Then + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); + assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); + } + + @Test + public void should_add_up_node_to_live_set() { + // When + policy.onUp(node3); + + // Then + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); + } + + @Test + public void should_ignore_up_node_when_filtered() { + // Given + when(nodeDistanceEvaluator.evaluateDistance(node3, "dc1")).thenReturn(NodeDistance.IGNORED); + + // When + policy.onUp(node3); + + // Then + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + verify(nodeDistanceEvaluator).evaluateDistance(node3, "dc1"); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); + } + + @Test + public void should_ignore_up_node_when_remote_dc() { + // Given + when(node3.getDatacenter()).thenReturn("dc2"); + + // When + policy.onUp(node3); + + // Then + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); + assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); + } + + @NonNull + protected BasicLoadBalancingPolicy createAndInitPolicy() { + BasicLoadBalancingPolicy policy = + new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + policy.init( + ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2); + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java new file mode 100644 index 00000000000..1863e7357e1 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyInitTest.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.filter; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.spi.ILoggingEvent; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class BasicLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { + + @Test + public void should_use_local_dc_if_provided_via_config() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(true); + when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn("dc1"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + // the parent class sets the config option to "dc1" + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); + + // Then + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); + } + + @Test + public void should_use_local_dc_if_provided_via_context() { + // Given + when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + // note: programmatic takes priority, the config won't even be inspected so no need to stub the + // option to null + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); + + // Then + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); + verify(defaultProfile, never()) + .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); + } + + @Test + public void should_not_infer_local_dc_if_not_provided() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); + when(node1.getDatacenter()).thenReturn("dc1"); + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + BasicLoadBalancingPolicy policy = + new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) {}; + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + assertThat(policy.getLocalDatacenter()).isNull(); + // should not warn about contact points not being in the same DC + verify(appender, never()).doAppend(loggingEventCaptor.capture()); + } + + @Test + public void should_warn_if_contact_points_not_in_local_dc() { + // Given + when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); + Iterable warnLogs = + filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); + assertThat(warnLogs).hasSize(1); + assertThat(warnLogs.iterator().next().getFormattedMessage()) + .contains( + "You specified dc1 as the local DC, but some contact points are from a different DC") + .contains("node2=dc2") + .contains("node3=dc3"); + } + + @Test + public void should_include_nodes_from_local_dc_if_local_dc_set() { + // Given + when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); + when(node1.getState()).thenReturn(NodeState.UP); + when(node2.getState()).thenReturn(NodeState.DOWN); + when(node3.getState()).thenReturn(NodeState.UNKNOWN); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + // Set distance for all nodes in the local DC + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + // But only include UP or UNKNOWN nodes in the live set + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); + } + + @Test + public void should_ignore_nodes_from_remote_dcs_if_local_dc_set() { + // Given + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); + } + + @Test + public void should_not_ignore_nodes_from_remote_dcs_if_local_dc_not_set() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); + } + + @Test + public void should_ignore_nodes_excluded_by_distance_reporter() { + // Given + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) + .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); + + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); + } + + @NonNull + protected BasicLoadBalancingPolicy createPolicy() { + return new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java new file mode 100644 index 00000000000..cefdfd31189 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyPreferredRemoteDcsTest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.Map; +import java.util.UUID; +import org.junit.Test; +import org.mockito.Mock; + +public class BasicLoadBalancingPolicyPreferredRemoteDcsTest + extends BasicLoadBalancingPolicyDcFailoverTest { + @Mock protected DefaultNode node10; + @Mock protected DefaultNode node11; + @Mock protected DefaultNode node12; + @Mock protected DefaultNode node13; + @Mock protected DefaultNode node14; + + @Override + @Test + public void should_prioritize_single_replica() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); + + // node3 always first, round-robin on the rest + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node3, node1, node2, node4, node5, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node3, node2, node4, node5, node1, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node3, node4, node5, node1, node2, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node3, node5, node1, node2, node4, node9, node10, node6, node7, node12, node13); + + // Should not shuffle replicas since there is only one + verify(policy, never()).shuffleHead(any(), eq(1)); + // But should shuffle remote nodes + verify(policy, times(12)).shuffleHead(any(), eq(2)); + } + + @Override + @Test + public void should_prioritize_and_shuffle_replicas() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .thenReturn(ImmutableSet.of(node1, node2, node3, node6, node9)); + + // node 6 and 9 being in a remote DC, they don't get a boost for being a replica + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node1, node2, node3, node4, node5, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node1, node2, node3, node5, node4, node9, node10, node6, node7, node12, node13); + + // should shuffle replicas + verify(policy, times(2)).shuffleHead(any(), eq(3)); + // should shuffle remote nodes + verify(policy, times(6)).shuffleHead(any(), eq(2)); + // No power of two choices with only two replicas + verify(session, never()).getPools(); + } + + @Override + protected void assertRoundRobinQueryPlans() { + for (int i = 0; i < 3; i++) { + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node1, node2, node3, node4, node5, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node2, node3, node4, node5, node1, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node3, node4, node5, node1, node2, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node4, node5, node1, node2, node3, node9, node10, node6, node7, node12, node13); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly( + node5, node1, node2, node3, node4, node9, node10, node6, node7, node12, node13); + } + + verify(policy, atLeast(15)).shuffleHead(any(), eq(2)); + } + + @Override + protected BasicLoadBalancingPolicy createAndInitPolicy() { + when(node4.getDatacenter()).thenReturn("dc1"); + when(node5.getDatacenter()).thenReturn("dc1"); + when(node6.getDatacenter()).thenReturn("dc2"); + when(node7.getDatacenter()).thenReturn("dc2"); + when(node8.getDatacenter()).thenReturn("dc2"); + when(node9.getDatacenter()).thenReturn("dc3"); + when(node10.getDatacenter()).thenReturn("dc3"); + when(node11.getDatacenter()).thenReturn("dc3"); + when(node12.getDatacenter()).thenReturn("dc4"); + when(node13.getDatacenter()).thenReturn("dc4"); + when(node14.getDatacenter()).thenReturn("dc4"); + + // Accept 2 nodes per remote DC + when(defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .thenReturn(2); + when(defaultProfile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) + .thenReturn(false); + + when(defaultProfile.getStringList( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_PREFERRED_REMOTE_DCS)) + .thenReturn(ImmutableList.of("dc3", "dc2")); + + // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was + // called (makes tests easier) + BasicLoadBalancingPolicy policy = + spy( + new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] currentNodes, int headLength) { + // nothing (keep in same order) + } + }); + Map nodes = + ImmutableMap.builder() + .put(UUID.randomUUID(), node1) + .put(UUID.randomUUID(), node2) + .put(UUID.randomUUID(), node3) + .put(UUID.randomUUID(), node4) + .put(UUID.randomUUID(), node5) + .put(UUID.randomUUID(), node6) + .put(UUID.randomUUID(), node7) + .put(UUID.randomUUID(), node8) + .put(UUID.randomUUID(), node9) + .put(UUID.randomUUID(), node10) + .put(UUID.randomUUID(), node11) + .put(UUID.randomUUID(), node12) + .put(UUID.randomUUID(), node13) + .put(UUID.randomUUID(), node14) + .build(); + policy.init(nodes, distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3, node4, node5); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node6, node7); // only 2 allowed + assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node9, node10); // only 2 allowed + assertThat(policy.getLiveNodes().dc("dc4")).containsExactly(node12, node13); // only 2 allowed + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java new file mode 100644 index 00000000000..50670ab317a --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicyQueryPlanTest.java @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static java.util.Collections.emptySet; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.TokenMap; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Optional; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class BasicLoadBalancingPolicyQueryPlanTest extends LoadBalancingPolicyTestBase { + + protected static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); + protected static final ByteBuffer ROUTING_KEY = Bytes.fromHexString("0xdeadbeef"); + + @Mock protected Request request; + @Mock protected DefaultSession session; + @Mock protected Metadata metadata; + @Mock protected TokenMap tokenMap; + @Mock protected Token routingToken; + + protected BasicLoadBalancingPolicy policy; + + @Before + @Override + public void setup() { + super.setup(); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + when(metadataManager.getMetadata()).thenReturn(metadata); + when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); + + policy = createAndInitPolicy(); + } + + @Test + public void should_use_round_robin_when_no_request() { + // Given + request = null; + + // When + assertRoundRobinQueryPlans(); + + // Then + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); + } + + @Test + public void should_use_round_robin_when_no_session() { + // Given + session = null; + + // When + assertRoundRobinQueryPlans(); + + // Then + then(request).should(never()).getRoutingKey(); + then(request).should(never()).getRoutingToken(); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); + } + + @Test + public void should_use_round_robin_when_request_has_no_routing_keyspace() { + // By default from Mockito: + assertThat(request.getKeyspace()).isNull(); + assertThat(request.getRoutingKeyspace()).isNull(); + + assertRoundRobinQueryPlans(); + + then(request).should(never()).getRoutingKey(); + then(request).should(never()).getRoutingToken(); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); + } + + @Test + public void should_use_round_robin_when_request_has_no_routing_key_or_token() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + assertThat(request.getRoutingKey()).isNull(); + assertThat(request.getRoutingToken()).isNull(); + + assertRoundRobinQueryPlans(); + + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); + } + + @Test + public void should_use_round_robin_when_token_map_absent() { + when(metadata.getTokenMap()).thenReturn(Optional.empty()); + + assertRoundRobinQueryPlans(); + + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(Token.class)); + then(tokenMap).should(never()).getReplicas(any(CqlIdentifier.class), any(ByteBuffer.class)); + } + + @Test + public void + should_use_round_robin_when_token_map_returns_no_replicas_using_request_keyspace_and_routing_key() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(Collections.emptySet()); + + assertRoundRobinQueryPlans(); + + then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); + } + + @Test + public void + should_use_round_robin_when_token_map_returns_no_replicas_using_session_keyspace_and_routing_key() { + // Given + given(request.getKeyspace()).willReturn(null); + given(request.getRoutingKeyspace()).willReturn(null); + given(session.getKeyspace()).willReturn(Optional.of(KEYSPACE)); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).willReturn(emptySet()); + // When + assertRoundRobinQueryPlans(); + // Then + then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); + } + + @Test + public void + should_use_round_robin_when_token_map_returns_no_replicas_using_request_keyspace_and_routing_token() { + // Given + given(request.getKeyspace()).willReturn(null); + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingToken()).willReturn(routingToken); + given(tokenMap.getReplicas(KEYSPACE, routingToken)).willReturn(emptySet()); + // When + assertRoundRobinQueryPlans(); + // Then + then(tokenMap).should(atLeast(1)).getReplicas(KEYSPACE, routingToken); + } + + @Test + public void should_use_round_robin_and_log_error_when_request_throws() { + // Given + given(request.getKeyspace()).willThrow(new NullPointerException()); + // When + policy.newQueryPlan(request, session); + // Then + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .contains("Unexpected error while trying to compute query plan"); + } + + protected void assertRoundRobinQueryPlans() { + for (int i = 0; i < 3; i++) { + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node1, node2, node3, node4, node5); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node2, node3, node4, node5, node1); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node4, node5, node1, node2); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node4, node5, node1, node2, node3); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node5, node1, node2, node3, node4); + } + } + + @Test + public void should_prioritize_single_replica() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); + + // node3 always first, round-robin on the rest + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node1, node2, node4, node5); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node2, node4, node5, node1); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node4, node5, node1, node2); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node5, node1, node2, node4); + + // Should not shuffle replicas since there is only one + verify(policy, never()).shuffleHead(any(), anyInt()); + } + + @Test + public void should_prioritize_and_shuffle_replicas() { + when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); + when(request.getRoutingKey()).thenReturn(ROUTING_KEY); + when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3, node5)); + + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node5, node1, node2, node4); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node5, node2, node4, node1); + assertThat(policy.newQueryPlan(request, session)) + .containsExactly(node3, node5, node4, node1, node2); + + verify(policy, times(3)).shuffleHead(any(), eq(2)); + // No power of two choices with only two replicas + verify(session, never()).getPools(); + } + + protected BasicLoadBalancingPolicy createAndInitPolicy() { + // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was + // called (makes tests easier) + BasicLoadBalancingPolicy policy = + spy( + new BasicLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] currentNodes, int headLength) { + // nothing (keep in same order) + } + }); + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, + UUID.randomUUID(), node2, + UUID.randomUUID(), node3, + UUID.randomUUID(), node4, + UUID.randomUUID(), node5), + distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3, node4, node5); + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java new file mode 100644 index 00000000000..dd9b74158f1 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDcFailoverTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.Map; +import java.util.UUID; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class DcInferringLoadBalancingPolicyDcFailoverTest + extends BasicLoadBalancingPolicyDcFailoverTest { + + @Override + protected DcInferringLoadBalancingPolicy createAndInitPolicy() { + when(node4.getDatacenter()).thenReturn("dc2"); + when(node5.getDatacenter()).thenReturn("dc2"); + when(node6.getDatacenter()).thenReturn("dc2"); + when(node7.getDatacenter()).thenReturn("dc3"); + when(node8.getDatacenter()).thenReturn("dc3"); + when(node9.getDatacenter()).thenReturn("dc3"); + // Accept 2 nodes per remote DC + when(defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .thenReturn(2); + when(defaultProfile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) + .thenReturn(false); + // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was + // called (makes tests easier) + DcInferringLoadBalancingPolicy policy = + spy( + new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] array, int n) {} + }); + Map nodes = + ImmutableMap.builder() + .put(UUID.randomUUID(), node1) + .put(UUID.randomUUID(), node2) + .put(UUID.randomUUID(), node3) + .put(UUID.randomUUID(), node4) + .put(UUID.randomUUID(), node5) + .put(UUID.randomUUID(), node6) + .put(UUID.randomUUID(), node7) + .put(UUID.randomUUID(), node8) + .put(UUID.randomUUID(), node9) + .build(); + policy.init(nodes, distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed + assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java new file mode 100644 index 00000000000..80c414aa8f2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyDistanceTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class DcInferringLoadBalancingPolicyDistanceTest + extends BasicLoadBalancingPolicyDistanceTest { + + @Override + public void should_report_LOCAL_when_dc_agnostic() { + // This policy cannot operate when contact points are from different DCs + Throwable error = catchThrowable(super::should_report_LOCAL_when_dc_agnostic); + assertThat(error) + .isInstanceOfSatisfying( + IllegalStateException.class, + ise -> + assertThat(ise) + .hasMessageContaining( + "No local DC was provided, but the contact points are from different DCs") + .hasMessageContaining("node1=null") + .hasMessageContaining("node2=dc1") + .hasMessageContaining("node3=dc2")); + } + + @NonNull + @Override + protected BasicLoadBalancingPolicy createPolicy() { + return new DcInferringLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java new file mode 100644 index 00000000000..218d6338df9 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyEventsTest.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; +import static org.mockito.Mockito.reset; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.UUID; + +public class DcInferringLoadBalancingPolicyEventsTest extends BasicLoadBalancingPolicyEventsTest { + + @Override + @NonNull + protected BasicLoadBalancingPolicy createAndInitPolicy() { + DcInferringLoadBalancingPolicy policy = + new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME); + policy.init( + ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsOnly(node1, node2); + reset(distanceReporter); + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java new file mode 100644 index 00000000000..20de3afe9c3 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyInitTest.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.assertj.core.api.Assertions.filter; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.spi.ILoggingEvent; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.UUID; +import org.junit.Test; + +public class DcInferringLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { + + @Test + public void should_use_local_dc_if_provided_via_config() { + // Given + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + // the parent class sets the config option to "dc1" + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); + + // Then + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); + } + + @Test + public void should_use_local_dc_if_provided_via_context() { + // Given + when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + // note: programmatic takes priority, the config won't even be inspected so no need to stub the + // option to null + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); + + // Then + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); + verify(defaultProfile, never()) + .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); + } + + @Test + public void should_infer_local_dc_from_contact_points() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); + + // Then + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); + } + + @Test + public void should_require_local_dc_if_contact_points_from_different_dcs() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + when(node2.getDatacenter()).thenReturn("dc2"); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + Throwable t = + catchThrowable( + () -> + policy.init( + ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), + distanceReporter)); + + // Then + assertThat(t) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "No local DC was provided, but the contact points are from different DCs: node1=dc1, node2=dc2"); + } + + @Test + public void should_require_local_dc_if_contact_points_have_null_dcs() { + // Given + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + when(node1.getDatacenter()).thenReturn(null); + when(node2.getDatacenter()).thenReturn(null); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + Throwable t = + catchThrowable( + () -> + policy.init( + ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), + distanceReporter)); + + // Then + assertThat(t) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "The local DC could not be inferred from contact points, please set it explicitly"); + } + + @Test + public void should_warn_if_contact_points_not_in_local_dc() { + // Given + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); + Iterable warnLogs = + filter(loggingEventCaptor.getAllValues()).with("level", Level.WARN).get(); + assertThat(warnLogs).hasSize(1); + assertThat(warnLogs.iterator().next().getFormattedMessage()) + .contains( + "You specified dc1 as the local DC, but some contact points are from a different DC") + .contains("node2=dc2") + .contains("node3=dc3"); + } + + @Test + public void should_include_nodes_from_local_dc() { + // Given + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + when(node1.getState()).thenReturn(NodeState.UP); + when(node2.getState()).thenReturn(NodeState.DOWN); + when(node3.getState()).thenReturn(NodeState.UNKNOWN); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + // Set distance for all nodes in the local DC + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + // But only include UP or UNKNOWN nodes in the live set + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); + } + + @Test + public void should_ignore_nodes_from_remote_dcs() { + // Given + when(node2.getDatacenter()).thenReturn("dc2"); + when(node3.getDatacenter()).thenReturn("dc3"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); + } + + @Test + public void should_ignore_nodes_excluded_by_distance_reporter() { + // Given + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); + when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) + .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); + + BasicLoadBalancingPolicy policy = createPolicy(); + + // When + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, UUID.randomUUID(), node2, UUID.randomUUID(), node3), + distanceReporter); + + // Then + verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); + } + + @NonNull + protected DcInferringLoadBalancingPolicy createPolicy() { + return new DcInferringLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java new file mode 100644 index 00000000000..23d4636a615 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DcInferringLoadBalancingPolicyQueryPlanTest.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; +import static org.mockito.Mockito.spy; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.UUID; + +public class DcInferringLoadBalancingPolicyQueryPlanTest + extends DefaultLoadBalancingPolicyQueryPlanTest { + + @Override + protected DcInferringLoadBalancingPolicy createAndInitPolicy() { + DcInferringLoadBalancingPolicy policy = + spy( + new DcInferringLoadBalancingPolicy(context, DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] array, int n) {} + + @Override + protected long nanoTime() { + return nanoTime; + } + + @Override + protected int diceRoll1d4() { + return diceRoll; + } + }); + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, + UUID.randomUUID(), node2, + UUID.randomUUID(), node3, + UUID.randomUUID(), node4, + UUID.randomUUID(), node5), + distanceReporter); + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java new file mode 100644 index 00000000000..f2e741fd756 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDcFailoverTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.Map; +import java.util.UUID; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class DefaultLoadBalancingPolicyDcFailoverTest + extends BasicLoadBalancingPolicyDcFailoverTest { + + @Override + protected DefaultLoadBalancingPolicy createAndInitPolicy() { + when(node4.getDatacenter()).thenReturn("dc2"); + when(node5.getDatacenter()).thenReturn("dc2"); + when(node6.getDatacenter()).thenReturn("dc2"); + when(node7.getDatacenter()).thenReturn("dc3"); + when(node8.getDatacenter()).thenReturn("dc3"); + when(node9.getDatacenter()).thenReturn("dc3"); + // Accept 2 nodes per remote DC + when(defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .thenReturn(2); + when(defaultProfile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) + .thenReturn(false); + // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was + // called (makes tests easier) + DefaultLoadBalancingPolicy policy = + spy( + new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] array, int n) {} + }); + Map nodes = + ImmutableMap.builder() + .put(UUID.randomUUID(), node1) + .put(UUID.randomUUID(), node2) + .put(UUID.randomUUID(), node3) + .put(UUID.randomUUID(), node4) + .put(UUID.randomUUID(), node5) + .put(UUID.randomUUID(), node6) + .put(UUID.randomUUID(), node7) + .put(UUID.randomUUID(), node8) + .put(UUID.randomUUID(), node9) + .build(); + policy.init(nodes, distanceReporter); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node2, node3); + assertThat(policy.getLiveNodes().dc("dc2")).containsExactly(node4, node5); // only 2 allowed + assertThat(policy.getLiveNodes().dc("dc3")).containsExactly(node7, node8); // only 2 allowed + return policy; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java new file mode 100644 index 00000000000..9cf30e048e9 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyDistanceTest.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +// TODO fix unnecessary stubbing of config option in parent class (and stop using "silent" runner) +@RunWith(MockitoJUnitRunner.Silent.class) +public class DefaultLoadBalancingPolicyDistanceTest extends BasicLoadBalancingPolicyDistanceTest { + + @Override + public void should_report_LOCAL_when_dc_agnostic() { + // This policy cannot operate in dc-agnostic mode + Throwable error = catchThrowable(super::should_report_LOCAL_when_dc_agnostic); + assertThat(error) + .isInstanceOfSatisfying( + IllegalStateException.class, + ise -> + assertThat(ise) + .hasMessageContaining("the local DC must be explicitly set") + .hasMessageContaining("node1=null") + .hasMessageContaining("node2=dc1") + .hasMessageContaining("node3=dc2") + .hasMessageContaining("Current DCs in this cluster are: dc1, dc2")); + } + + @NonNull + @Override + protected BasicLoadBalancingPolicy createPolicy() { + return new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java index a1bec905103..17e926a29e0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyEventsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,149 +17,24 @@ */ package com.datastax.oss.driver.internal.core.loadbalancing; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.never; +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; -import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; import java.util.UUID; -import java.util.function.Predicate; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class DefaultLoadBalancingPolicyEventsTest extends DefaultLoadBalancingPolicyTestBase { - - @Mock private Predicate filter; - private DefaultLoadBalancingPolicy policy; +public class DefaultLoadBalancingPolicyEventsTest extends BasicLoadBalancingPolicyEventsTest { - @Before @Override - public void setup() { - super.setup(); - - when(filter.test(any(Node.class))).thenReturn(true); - when(context.getNodeFilter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(filter); - - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - - policy = new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + @NonNull + protected DefaultLoadBalancingPolicy createAndInitPolicy() { + DefaultLoadBalancingPolicy policy = new DefaultLoadBalancingPolicy(context, DEFAULT_NAME); policy.init( ImmutableMap.of(UUID.randomUUID(), node1, UUID.randomUUID(), node2), distanceReporter); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2); - + assertThat(policy.getLiveNodes().dc("dc1")).containsOnly(node1, node2); reset(distanceReporter); - } - - @Test - public void should_remove_down_node_from_live_set() { - // When - policy.onDown(node2); - - // Then - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1); - verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); - // should have been called only once, during initialization, but not during onDown - verify(filter).test(node2); - } - - @Test - public void should_remove_removed_node_from_live_set() { - // When - policy.onRemove(node2); - - // Then - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1); - verify(distanceReporter, never()).setDistance(eq(node2), any(NodeDistance.class)); - // should have been called only once, during initialization, but not during onRemove - verify(filter).test(node2); - } - - @Test - public void should_set_added_node_to_local() { - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - verify(filter).test(node3); - // Not added to the live set yet, we're waiting for the pool to open - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2); - } - - @Test - public void should_ignore_added_node_when_filtered() { - // Given - when(filter.test(node3)).thenReturn(false); - - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2); - } - - @Test - public void should_ignore_added_node_when_remote_dc() { - // Given - when(node3.getDatacenter()).thenReturn("dc2"); - - // When - policy.onAdd(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2); - } - - @Test - public void should_add_up_node_to_live_set() { - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); - verify(filter).test(node3); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2, node3); - } - - @Test - public void should_ignore_up_node_when_filtered() { - // Given - when(filter.test(node3)).thenReturn(false); - - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - verify(filter).test(node3); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2); - } - - @Test - public void should_ignore_up_node_when_remote_dc() { - // Given - when(node3.getDatacenter()).thenReturn("dc2"); - - // When - policy.onUp(node3); - - // Then - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node2); + return policy; } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java index f1d2c68fa43..7b875209743 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyInitTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.loadbalancing; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.filter; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.never; @@ -30,37 +33,40 @@ import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; import java.util.UUID; import org.junit.Test; -public class DefaultLoadBalancingPolicyInitTest extends DefaultLoadBalancingPolicyTestBase { +public class DefaultLoadBalancingPolicyInitTest extends LoadBalancingPolicyTestBase { @Test public void should_use_local_dc_if_provided_via_config() { // Given + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); // the parent class sets the config option to "dc1" + DefaultLoadBalancingPolicy policy = createPolicy(); // When - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); // Then - assertThat(policy.localDc).isEqualTo("dc1"); + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); } @Test public void should_use_local_dc_if_provided_via_context() { // Given when(context.getLocalDatacenter(DriverExecutionProfile.DEFAULT_NAME)).thenReturn("dc1"); + when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); // note: programmatic takes priority, the config won't even be inspected so no need to stub the // option to null + DefaultLoadBalancingPolicy policy = createPolicy(); // When - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); // Then - assertThat(policy.localDc).isEqualTo("dc1"); + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); verify(defaultProfile, never()) .getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null); } @@ -68,35 +74,33 @@ public void should_use_local_dc_if_provided_via_context() { @Test public void should_infer_local_dc_if_no_explicit_contact_points() { // Given - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null)) - .thenReturn(null); + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); when(metadataManager.wasImplicitContactPoint()).thenReturn(true); - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + DefaultLoadBalancingPolicy policy = createPolicy(); // When policy.init(ImmutableMap.of(UUID.randomUUID(), node1), distanceReporter); // Then - assertThat(policy.localDc).isEqualTo("dc1"); + assertThat(policy.getLocalDatacenter()).isEqualTo("dc1"); } @Test public void should_require_local_dc_if_explicit_contact_points() { // Given - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null)) - .thenReturn(null); - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node2)); + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(false); when(metadataManager.wasImplicitContactPoint()).thenReturn(false); - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); - - thrown.expect(IllegalStateException.class); - thrown.expectMessage("You provided explicit contact points, the local DC must be specified"); + DefaultLoadBalancingPolicy policy = createPolicy(); // When - policy.init(ImmutableMap.of(UUID.randomUUID(), node2), distanceReporter); + assertThatThrownBy( + () -> policy.init(ImmutableMap.of(UUID.randomUUID(), node2), distanceReporter)) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Since you provided explicit contact points, the local DC must be explicitly set"); } @Test @@ -105,8 +109,7 @@ public void should_warn_if_contact_points_not_in_local_dc() { when(node2.getDatacenter()).thenReturn("dc2"); when(node3.getDatacenter()).thenReturn("dc3"); when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2, node3)); - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + DefaultLoadBalancingPolicy policy = createPolicy(); // When policy.init( @@ -133,8 +136,7 @@ public void should_include_nodes_from_local_dc() { when(node1.getState()).thenReturn(NodeState.UP); when(node2.getState()).thenReturn(NodeState.DOWN); when(node3.getState()).thenReturn(NodeState.UNKNOWN); - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + DefaultLoadBalancingPolicy policy = createPolicy(); // When policy.init( @@ -148,7 +150,7 @@ public void should_include_nodes_from_local_dc() { verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); // But only include UP or UNKNOWN nodes in the live set - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1, node3); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1, node3); } @Test @@ -157,8 +159,7 @@ public void should_ignore_nodes_from_remote_dcs() { when(node2.getDatacenter()).thenReturn("dc2"); when(node3.getDatacenter()).thenReturn("dc3"); when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + DefaultLoadBalancingPolicy policy = createPolicy(); // When policy.init( @@ -170,18 +171,19 @@ public void should_ignore_nodes_from_remote_dcs() { verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node1); + assertThat(policy.getLiveNodes().dc("dc2")).isEmpty(); + assertThat(policy.getLiveNodes().dc("dc3")).isEmpty(); } @Test - public void should_ignore_nodes_excluded_by_filter() { + public void should_ignore_nodes_excluded_by_distance_reporter() { // Given when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1, node2)); - when(context.getNodeFilter(DriverExecutionProfile.DEFAULT_NAME)) - .thenReturn(node -> node.equals(node1)); + when(context.getNodeDistanceEvaluator(DriverExecutionProfile.DEFAULT_NAME)) + .thenReturn((node, dc) -> node.equals(node1) ? NodeDistance.IGNORED : null); - DefaultLoadBalancingPolicy policy = - new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); + BasicLoadBalancingPolicy policy = createPolicy(); // When policy.init( @@ -190,9 +192,14 @@ public void should_ignore_nodes_excluded_by_filter() { distanceReporter); // Then - verify(distanceReporter).setDistance(node1, NodeDistance.LOCAL); - verify(distanceReporter).setDistance(node2, NodeDistance.IGNORED); - verify(distanceReporter).setDistance(node3, NodeDistance.IGNORED); - assertThat(policy.localDcLiveNodes).containsExactlyInAnyOrder(node1); + verify(distanceReporter).setDistance(node1, NodeDistance.IGNORED); + verify(distanceReporter).setDistance(node2, NodeDistance.LOCAL); + verify(distanceReporter).setDistance(node3, NodeDistance.LOCAL); + assertThat(policy.getLiveNodes().dc("dc1")).containsExactly(node2, node3); + } + + @NonNull + protected DefaultLoadBalancingPolicy createPolicy() { + return new DefaultLoadBalancingPolicy(context, DriverExecutionProfile.DEFAULT_NAME); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java index 60d67923935..fff86a1b750 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyQueryPlanTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,182 +17,346 @@ */ package com.datastax.oss.driver.internal.core.loadbalancing; +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeast; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.then; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.TokenMap; -import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.internal.core.session.DefaultSession; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; import java.util.Optional; +import java.util.Queue; import java.util.UUID; +import java.util.concurrent.atomic.AtomicLongArray; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; -public class DefaultLoadBalancingPolicyQueryPlanTest extends DefaultLoadBalancingPolicyTestBase { +public class DefaultLoadBalancingPolicyQueryPlanTest extends BasicLoadBalancingPolicyQueryPlanTest { + + private static final long T0 = Long.MIN_VALUE; + private static final long T1 = 100; + private static final long T2 = 200; + private static final long T3 = 300; - private static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); - private static final ByteBuffer ROUTING_KEY = Bytes.fromHexString("0xdeadbeef"); + @Mock protected ChannelPool pool1; + @Mock protected ChannelPool pool2; + @Mock protected ChannelPool pool3; + @Mock protected ChannelPool pool4; + @Mock protected ChannelPool pool5; - @Mock private Request request; - @Mock private DefaultSession session; - @Mock private Metadata metadata; - @Mock private TokenMap tokenMap; + long nanoTime; + int diceRoll; - private DefaultLoadBalancingPolicy policy; + private DefaultLoadBalancingPolicy dsePolicy; @Before @Override public void setup() { + nanoTime = T1; + diceRoll = 4; + given(node4.getDatacenter()).willReturn("dc1"); + given(node5.getDatacenter()).willReturn("dc1"); + given(session.getPools()) + .willReturn( + ImmutableMap.of( + node1, pool1, + node2, pool2, + node3, pool3, + node4, pool4, + node5, pool5)); + given(context.getMetadataManager()).willReturn(metadataManager); + given(metadataManager.getMetadata()).willReturn(metadata); + given(metadataManager.getContactPoints()).willReturn(ImmutableSet.of(node1)); + given(metadata.getTokenMap()).willAnswer(invocation -> Optional.of(tokenMap)); super.setup(); - - when(metadataManager.getContactPoints()).thenReturn(ImmutableSet.of(node1)); - - when(metadataManager.getMetadata()).thenReturn(metadata); - when(metadata.getTokenMap()).thenAnswer(invocation -> Optional.of(this.tokenMap)); - - // Use a subclass to disable shuffling, we just spy to make sure that the shuffling method was - // called (makes tests easier) - policy = spy(new NonShufflingPolicy(context, DriverExecutionProfile.DEFAULT_NAME)); - policy.init( - ImmutableMap.of( - UUID.randomUUID(), node1, - UUID.randomUUID(), node2, - UUID.randomUUID(), node3, - UUID.randomUUID(), node4, - UUID.randomUUID(), node5), - distanceReporter); - - // Note: this test relies on the fact that the policy uses a CopyOnWriteArraySet which preserves + dsePolicy = (DefaultLoadBalancingPolicy) policy; + // Note: this assertion relies on the fact that policy.getLiveNodes() implementation preserves // insertion order. - assertThat(policy.localDcLiveNodes).containsExactly(node1, node2, node3, node4, node5); + assertThat(dsePolicy.getLiveNodes().dc("dc1")) + .containsExactly(node1, node2, node3, node4, node5); } @Test - public void should_use_round_robin_when_request_has_no_routing_keyspace() { - // By default from Mockito: - assertThat(request.getKeyspace()).isNull(); - assertThat(request.getRoutingKeyspace()).isNull(); - - assertRoundRobinQueryPlans(); - - verify(request, never()).getRoutingKey(); - verify(request, never()).getRoutingToken(); - verify(metadataManager, never()).getMetadata(); + public void should_prioritize_and_shuffle_2_replicas() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).willReturn(ImmutableSet.of(node3, node5)); + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + Queue plan3 = dsePolicy.newQueryPlan(request, session); + + // Then + // node3 and node5 always first, round-robin on the rest + assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); + assertThat(plan2).containsExactly(node3, node5, node2, node4, node1); + assertThat(plan3).containsExactly(node3, node5, node4, node1, node2); + + then(dsePolicy).should(times(3)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(never()).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); } @Test - public void should_use_round_robin_when_request_has_no_routing_key_or_token() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - assertThat(request.getRoutingKey()).isNull(); - assertThat(request.getRoutingToken()).isNull(); - - assertRoundRobinQueryPlans(); - - verify(metadataManager, never()).getMetadata(); + public void should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_all_newly_up() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + dsePolicy.upTimes.put(node1, T1); + dsePolicy.upTimes.put(node3, T2); + dsePolicy.upTimes.put(node5, T3); // newest up replica + given(pool1.getInFlight()).willReturn(0); + given(pool3.getInFlight()).willReturn(0); + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // newest up replica is 5, not in first or second position + assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); + assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); } @Test - public void should_use_round_robin_when_token_map_absent() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - - when(metadata.getTokenMap()).thenReturn(Optional.empty()); - - assertRoundRobinQueryPlans(); - - verify(metadata, atLeast(1)).getTokenMap(); + public void + should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_some_newly_up_and_dice_roll_4() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + dsePolicy.upTimes.put(node1, T2); // newest up replica + dsePolicy.upTimes.put(node3, T1); + given(pool3.getInFlight()).willReturn(0); + given(pool5.getInFlight()).willReturn(0); + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // newest up replica is node1 in first position and diceRoll = 4 -> bubbles down + assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); + assertThat(plan2).containsExactly(node3, node5, node1, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(times(2)).diceRoll1d4(); } @Test - public void should_use_round_robin_when_token_map_returns_no_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(Collections.emptySet()); - - assertRoundRobinQueryPlans(); + public void + should_prioritize_and_shuffle_3_or_more_replicas_when_all_healthy_and_some_newly_up_and_dice_roll_1() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + dsePolicy.upTimes.put(node1, T2); // newest up replica + dsePolicy.upTimes.put(node3, T1); + given(pool1.getInFlight()).willReturn(0); + given(pool3.getInFlight()).willReturn(0); + diceRoll = 1; + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // newest up replica is node1 in first position and diceRoll = 1 -> does not bubble down + assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); + assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(times(2)).diceRoll1d4(); + } - verify(tokenMap, atLeast(1)).getReplicas(KEYSPACE, ROUTING_KEY); + @Test + public void should_prioritize_and_shuffle_3_or_more_replicas_when_first_unhealthy() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + given(pool1.getInFlight()).willReturn(100); // unhealthy + given(pool3.getInFlight()).willReturn(0); + given(pool5.getInFlight()).willReturn(0); + + dsePolicy.responseTimes.put( + node1, + dsePolicy + .new NodeResponseRateSample(new AtomicLongArray(new long[] {T0, T0}))); // unhealthy + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // node1 is unhealthy = 1 -> bubbles down + assertThat(plan1).containsExactly(node3, node5, node1, node2, node4); + assertThat(plan2).containsExactly(node3, node5, node1, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); } - private void assertRoundRobinQueryPlans() { - for (int i = 0; i < 3; i++) { - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node1, node2, node3, node4, node5); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node2, node3, node4, node5, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node4, node5, node1, node2); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node4, node5, node1, node2, node3); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node5, node1, node2, node3, node4); - } + @Test + public void + should_not_treat_node_as_unhealthy_if_has_in_flight_exceeded_but_response_times_normal() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + given(pool1.getInFlight()).willReturn(100); // unhealthy + given(pool3.getInFlight()).willReturn(0); + given(pool5.getInFlight()).willReturn(0); + + dsePolicy.responseTimes.put( + node1, + dsePolicy.new NodeResponseRateSample(new AtomicLongArray(new long[] {T1, T1}))); // healthy + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // node1 has more in-flight than node3 -> swap + assertThat(plan1).containsExactly(node3, node1, node5, node2, node4); + assertThat(plan2).containsExactly(node3, node1, node5, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); } @Test - public void should_prioritize_single_replica() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3)); - - // node3 always first, round-robin on the rest - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node1, node2, node4, node5); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node2, node4, node5, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node4, node5, node1, node2); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node1, node2, node4); - - // Should not shuffle replicas since there is only one - verify(policy, never()).shuffleHead(any(), anyInt()); + public void should_prioritize_and_shuffle_3_or_more_replicas_when_last_unhealthy() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + given(pool1.getInFlight()).willReturn(0); + given(pool3.getInFlight()).willReturn(0); + given(pool5.getInFlight()).willReturn(100); // unhealthy + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // node5 is unhealthy -> noop + assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); + assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); } @Test - public void should_prioritize_and_shuffle_replicas() { - when(request.getRoutingKeyspace()).thenReturn(KEYSPACE); - when(request.getRoutingKey()).thenReturn(ROUTING_KEY); - when(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)).thenReturn(ImmutableSet.of(node3, node5)); - - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node1, node2, node4); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node2, node4, node1); - assertThat(policy.newQueryPlan(request, session)) - .containsExactly(node3, node5, node4, node1, node2); - - verify(policy, times(3)).shuffleHead(any(), eq(2)); - // No power of two choices with only two replicas - verify(session, never()).getPools(); + public void should_prioritize_and_shuffle_3_or_more_replicas_when_majority_unhealthy() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + given(pool1.getInFlight()).willReturn(100); + given(pool3.getInFlight()).willReturn(100); + given(pool5.getInFlight()).willReturn(0); + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // majority of nodes unhealthy -> noop + assertThat(plan1).containsExactly(node1, node3, node5, node2, node4); + assertThat(plan2).containsExactly(node1, node3, node5, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); } - static class NonShufflingPolicy extends DefaultLoadBalancingPolicy { - NonShufflingPolicy(DriverContext context, String profileName) { - super(context, profileName); - } + @Test + public void should_reorder_first_two_replicas_when_first_has_more_in_flight_than_second() { + // Given + given(request.getRoutingKeyspace()).willReturn(KEYSPACE); + given(request.getRoutingKey()).willReturn(ROUTING_KEY); + given(tokenMap.getReplicas(KEYSPACE, ROUTING_KEY)) + .willReturn(ImmutableSet.of(node1, node3, node5)); + given(pool1.getInFlight()).willReturn(200); + given(pool3.getInFlight()).willReturn(100); + + // When + Queue plan1 = dsePolicy.newQueryPlan(request, session); + Queue plan2 = dsePolicy.newQueryPlan(request, session); + + // Then + // nodes 1, 3 and 5 always first, round-robin on the rest + // node1 has more in-flight than node3 -> swap + assertThat(plan1).containsExactly(node3, node1, node5, node2, node4); + assertThat(plan2).containsExactly(node3, node1, node5, node4, node2); + + then(dsePolicy).should(times(2)).shuffleHead(any(), anyInt()); + then(dsePolicy).should(times(2)).nanoTime(); + then(dsePolicy).should(never()).diceRoll1d4(); + } - @Override - protected void shuffleHead(Object[] currentNodes, int replicaCount) { - // nothing (keep in same order) - } + @Override + protected DefaultLoadBalancingPolicy createAndInitPolicy() { + DefaultLoadBalancingPolicy policy = + spy( + new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { + @Override + protected void shuffleHead(Object[] array, int n) {} + + @Override + protected long nanoTime() { + return nanoTime; + } + + @Override + protected int diceRoll1d4() { + return diceRoll; + } + }); + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, + UUID.randomUUID(), node2, + UUID.randomUUID(), node3, + UUID.randomUUID(), node4, + UUID.randomUUID(), node5), + distanceReporter); + return policy; } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java new file mode 100644 index 00000000000..757af43ef67 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyRequestTrackerTest.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static com.datastax.oss.driver.api.core.config.DriverExecutionProfile.DEFAULT_NAME; +import static org.mockito.BDDMockito.given; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; + +public class DefaultLoadBalancingPolicyRequestTrackerTest extends LoadBalancingPolicyTestBase { + + @Mock Request request; + @Mock DriverExecutionProfile profile; + final String logPrefix = "lbp-test-log-prefix"; + + private DefaultLoadBalancingPolicy policy; + private long nextNanoTime; + + @Before + @Override + public void setup() { + super.setup(); + given(metadataManager.getContactPoints()).willReturn(ImmutableSet.of(node1)); + policy = + new DefaultLoadBalancingPolicy(context, DEFAULT_NAME) { + @Override + protected long nanoTime() { + return nextNanoTime; + } + }; + policy.init( + ImmutableMap.of( + UUID.randomUUID(), node1, + UUID.randomUUID(), node2, + UUID.randomUUID(), node3), + distanceReporter); + } + + @Test + public void should_record_first_response_time_on_node_success() { + // Given + nextNanoTime = 123; + + // When + policy.onNodeSuccess(request, 0, profile, node1, logPrefix); + + // Then + assertThat(policy.responseTimes) + .hasEntrySatisfying(node1, value -> assertThat(value.oldest).isEqualTo(123L)) + .doesNotContainKeys(node2, node3); + assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); + } + + @Test + public void should_record_second_response_time_on_node_success() { + // Given + should_record_first_response_time_on_node_success(); + nextNanoTime = 456; + + // When + policy.onNodeSuccess(request, 0, profile, node1, logPrefix); + + // Then + assertThat(policy.responseTimes) + .hasEntrySatisfying( + node1, + value -> { + // oldest value first + assertThat(value.oldest).isEqualTo(123); + assertThat(value.newest.getAsLong()).isEqualTo(456); + }) + .doesNotContainKeys(node2, node3); + assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); + } + + @Test + public void should_record_further_response_times_on_node_success() { + // Given + should_record_second_response_time_on_node_success(); + nextNanoTime = 789; + + // When + policy.onNodeSuccess(request, 0, profile, node1, logPrefix); + policy.onNodeSuccess(request, 0, profile, node2, logPrefix); + + // Then + assertThat(policy.responseTimes) + .hasEntrySatisfying( + node1, + value -> { + // values should rotate left (bubble up) + assertThat(value.oldest).isEqualTo(456); + assertThat(value.newest.getAsLong()).isEqualTo(789); + }) + .hasEntrySatisfying(node2, value -> assertThat(value.oldest).isEqualTo(789)) + .doesNotContainKey(node3); + assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); + } + + @Test + public void should_record_first_response_time_on_node_error() { + // Given + nextNanoTime = 123; + Throwable iae = new IllegalArgumentException(); + + // When + policy.onNodeError(request, iae, 0, profile, node1, logPrefix); + + // Then + assertThat(policy.responseTimes) + .hasEntrySatisfying(node1, value -> assertThat(value.oldest).isEqualTo(123L)) + .doesNotContainKeys(node2, node3); + assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); + } + + @Test + public void should_record_second_response_time_on_node_error() { + // Given + should_record_first_response_time_on_node_error(); + nextNanoTime = 456; + Throwable iae = new IllegalArgumentException(); + + // When + policy.onNodeError(request, iae, 0, profile, node1, logPrefix); + + // Then + assertThat(policy.responseTimes) + .hasEntrySatisfying( + node1, + value -> { + // oldest value first + assertThat(value.oldest).isEqualTo(123); + assertThat(value.newest.getAsLong()).isEqualTo(456); + }) + .doesNotContainKeys(node2, node3); + assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); + } + + @Test + public void should_record_further_response_times_on_node_error() { + // Given + should_record_second_response_time_on_node_error(); + nextNanoTime = 789; + Throwable iae = new IllegalArgumentException(); + + // When + policy.onNodeError(request, iae, 0, profile, node1, logPrefix); + policy.onNodeError(request, iae, 0, profile, node2, logPrefix); + + // Then + assertThat(policy.responseTimes) + .hasEntrySatisfying( + node1, + value -> { + // values should rotate left (bubble up) + assertThat(value.oldest).isEqualTo(456); + assertThat(value.newest.getAsLong()).isEqualTo(789); + }) + .hasEntrySatisfying(node2, value -> assertThat(value.oldest).isEqualTo(789)) + .doesNotContainKey(node3); + assertThat(policy.isResponseRateInsufficient(node1, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node2, nextNanoTime)).isFalse(); + assertThat(policy.isResponseRateInsufficient(node3, nextNanoTime)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java similarity index 64% rename from core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyTestBase.java rename to core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java index e4f648eb3af..c9149efa69f 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/DefaultLoadBalancingPolicyTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/LoadBalancingPolicyTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,14 +28,13 @@ import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.DefaultConsistencyLevelRegistry; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.DefaultNode; import com.datastax.oss.driver.internal.core.metadata.MetadataManager; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import org.junit.After; import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; @@ -42,9 +43,7 @@ import org.slf4j.LoggerFactory; @RunWith(MockitoJUnitRunner.class) -public abstract class DefaultLoadBalancingPolicyTestBase { - - @Rule public ExpectedException thrown = ExpectedException.none(); +public abstract class LoadBalancingPolicyTestBase { @Mock protected DefaultNode node1; @Mock protected DefaultNode node2; @@ -68,12 +67,25 @@ public void setup() { when(context.getConfig()).thenReturn(config); when(config.getProfile(DriverExecutionProfile.DEFAULT_NAME)).thenReturn(defaultProfile); - when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, null)) + when(defaultProfile.getName()).thenReturn(DriverExecutionProfile.DEFAULT_NAME); + when(defaultProfile.isDefined(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) + .thenReturn(true); + when(defaultProfile.getString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER)) .thenReturn("dc1"); + when(defaultProfile.getBoolean(DefaultDriverOption.LOAD_BALANCING_POLICY_SLOW_AVOIDANCE, true)) + .thenReturn(true); + when(defaultProfile.getInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC)) + .thenReturn(0); + when(defaultProfile.getBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS)) + .thenReturn(false); + when(defaultProfile.getString(DefaultDriverOption.REQUEST_CONSISTENCY)).thenReturn("ONE"); when(context.getMetadataManager()).thenReturn(metadataManager); - logger = (Logger) LoggerFactory.getLogger(DefaultLoadBalancingPolicy.class); + logger = + (Logger) LoggerFactory.getLogger("com.datastax.oss.driver.internal.core.loadbalancing"); logger.addAppender(appender); for (Node node : ImmutableList.of(node1, node2, node3, node4, node5)) { @@ -81,6 +93,7 @@ public void setup() { } when(context.getLocalDatacenter(anyString())).thenReturn(null); + when(context.getConsistencyLevelRegistry()).thenReturn(new DefaultConsistencyLevelRegistry()); } @After diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java new file mode 100644 index 00000000000..0730bcd346c --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/DcAgnosticNodeSetTest.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import com.datastax.oss.driver.api.core.metadata.Node; +import org.junit.Test; + +public class DcAgnosticNodeSetTest { + + @Test + public void should_add_node() { + DcAgnosticNodeSet set = new DcAgnosticNodeSet(); + Node node = mock(Node.class); + assertThat(set.add(node)).isTrue(); + assertThat(set.add(node)).isFalse(); + } + + @Test + public void should_remove_node() { + DcAgnosticNodeSet set = new DcAgnosticNodeSet(); + Node node = mock(Node.class); + set.add(node); + assertThat(set.remove(node)).isTrue(); + assertThat(set.remove(node)).isFalse(); + } + + @Test + public void should_return_all_nodes() { + DcAgnosticNodeSet set = new DcAgnosticNodeSet(); + Node node1 = mock(Node.class); + set.add(node1); + Node node2 = mock(Node.class); + set.add(node2); + assertThat(set.dc(null)).contains(node1, node2); + assertThat(set.dc("irrelevant")).contains(node1, node2); + } + + @Test + public void should_return_empty_dcs() { + DcAgnosticNodeSet set = new DcAgnosticNodeSet(); + assertThat(set.dcs()).isEmpty(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java new file mode 100644 index 00000000000..21c58cbb829 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/MultiDcNodeSetTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.metadata.Node; +import org.junit.Test; + +public class MultiDcNodeSetTest { + + @Test + public void should_add_node() { + MultiDcNodeSet set = new MultiDcNodeSet(); + Node node1 = mockNode("dc1"); + assertThat(set.add(node1)).isTrue(); + assertThat(set.add(node1)).isFalse(); + Node node2 = mockNode("dc2"); + assertThat(set.add(node2)).isTrue(); + assertThat(set.add(node2)).isFalse(); + } + + @Test + public void should_remove_node() { + MultiDcNodeSet set = new MultiDcNodeSet(); + Node node1 = mockNode("dc1"); + set.add(node1); + assertThat(set.remove(node1)).isTrue(); + assertThat(set.remove(node1)).isFalse(); + Node node2 = mockNode("dc2"); + set.add(node2); + assertThat(set.remove(node2)).isTrue(); + assertThat(set.remove(node2)).isFalse(); + } + + @Test + public void should_return_all_nodes_in_dc() { + MultiDcNodeSet set = new MultiDcNodeSet(); + Node node1 = mockNode("dc1"); + set.add(node1); + Node node2 = mockNode("dc1"); + set.add(node2); + Node node3 = mockNode("dc2"); + set.add(node3); + assertThat(set.dc("dc1")).contains(node1, node2); + assertThat(set.dc("dc2")).contains(node3); + assertThat(set.dc("dc3")).isEmpty(); + assertThat(set.dc(null)).isEmpty(); + } + + @Test + public void should_return_all_dcs() { + MultiDcNodeSet set = new MultiDcNodeSet(); + Node node1 = mockNode("dc1"); + set.add(node1); + Node node2 = mockNode("dc2"); + set.add(node2); + assertThat(set.dcs()).contains("dc1", "dc2"); + } + + private Node mockNode(String dc) { + Node node = mock(Node.class); + when(node.getDatacenter()).thenReturn(dc); + return node; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java new file mode 100644 index 00000000000..063c13c9386 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/loadbalancing/nodeset/SingleDcNodeSetTest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.loadbalancing.nodeset; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.metadata.Node; +import org.junit.Test; + +public class SingleDcNodeSetTest { + + @Test + public void should_add_node() { + SingleDcNodeSet set = new SingleDcNodeSet("dc1"); + Node node1 = mockNode("dc1"); + assertThat(set.add(node1)).isTrue(); + assertThat(set.add(node1)).isFalse(); + Node node2 = mockNode("dc2"); + assertThat(set.add(node2)).isFalse(); + } + + @Test + public void should_remove_node() { + SingleDcNodeSet set = new SingleDcNodeSet("dc1"); + Node node = mockNode("dc1"); + set.add(node); + assertThat(set.remove(node)).isTrue(); + assertThat(set.remove(node)).isFalse(); + } + + @Test + public void should_return_all_nodes_if_local_dc() { + SingleDcNodeSet set = new SingleDcNodeSet("dc1"); + Node node1 = mockNode("dc1"); + set.add(node1); + Node node2 = mockNode("dc1"); + set.add(node2); + Node node3 = mockNode("dc2"); + set.add(node3); + assertThat(set.dc("dc1")).contains(node1, node2); + assertThat(set.dc("dc2")).isEmpty(); + assertThat(set.dc(null)).isEmpty(); + } + + @Test + public void should_return_only_local_dc() { + SingleDcNodeSet set = new SingleDcNodeSet("dc1"); + assertThat(set.dcs()).contains("dc1"); + } + + private Node mockNode(String dc) { + Node node = mock(Node.class); + when(node.getDatacenter()).thenReturn(dc); + return node; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java index 52d509ada88..8d337bcc7e3 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/AddNodeRefreshTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; @@ -35,17 +38,17 @@ @RunWith(MockitoJUnitRunner.class) public class AddNodeRefreshTest { - private static final InetSocketAddress ADDRESS1 = new InetSocketAddress("127.0.0.1", 9042); - private static final InetSocketAddress ADDRESS2 = new InetSocketAddress("127.0.0.2", 9042); @Mock private InternalDriverContext context; @Mock protected MetricsFactory metricsFactory; + @Mock private ChannelFactory channelFactory; private DefaultNode node1; @Before public void setup() { when(context.getMetricsFactory()).thenReturn(metricsFactory); + when(context.getChannelFactory()).thenReturn(channelFactory); node1 = TestNodeFactory.newNode(1, context); } @@ -54,7 +57,7 @@ public void should_add_new_node() { // Given DefaultMetadata oldMetadata = new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null); + ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); UUID newHostId = Uuids.random(); DefaultEndPoint newEndPoint = TestNodeFactory.newEndPoint(2); UUID newSchemaVersion = Uuids.random(); @@ -84,11 +87,11 @@ public void should_add_new_node() { } @Test - public void should_not_add_existing_node() { + public void should_not_add_existing_node_with_same_id_and_endpoint() { // Given DefaultMetadata oldMetadata = new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null); + ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); DefaultNodeInfo newNodeInfo = DefaultNodeInfo.builder() .withHostId(node1.getHostId()) @@ -108,4 +111,37 @@ public void should_not_add_existing_node() { assertThat(node1.getRack()).isNull(); assertThat(result.events).isEmpty(); } + + @Test + public void should_add_existing_node_with_same_id_but_different_endpoint() { + // Given + DefaultMetadata oldMetadata = + new DefaultMetadata( + ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); + DefaultEndPoint newEndPoint = TestNodeFactory.newEndPoint(2); + InetSocketAddress newBroadcastRpcAddress = newEndPoint.resolve(); + UUID newSchemaVersion = Uuids.random(); + DefaultNodeInfo newNodeInfo = + DefaultNodeInfo.builder() + .withHostId(node1.getHostId()) + .withEndPoint(newEndPoint) + .withDatacenter("dc1") + .withRack("rack2") + .withSchemaVersion(newSchemaVersion) + .withBroadcastRpcAddress(newBroadcastRpcAddress) + .build(); + AddNodeRefresh refresh = new AddNodeRefresh(newNodeInfo); + + // When + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + + // Then + Map newNodes = result.newMetadata.getNodes(); + assertThat(newNodes).hasSize(1).containsEntry(node1.getHostId(), node1); + assertThat(node1.getEndPoint()).isEqualTo(newEndPoint); + assertThat(node1.getDatacenter()).isEqualTo("dc1"); + assertThat(node1.getRack()).isEqualTo("rack2"); + assertThat(node1.getSchemaVersion()).isEqualTo(newSchemaVersion); + assertThat(result.events).containsExactly(TopologyEvent.suggestUp(newBroadcastRpcAddress)); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java index 5c3689920c1..7da8fb39415 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultEndPointTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,16 +18,13 @@ package com.datastax.oss.driver.internal.core.metadata; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.net.InetSocketAddress; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class DefaultEndPointTest { - @Rule public ExpectedException thrown = ExpectedException.none(); - @Test public void should_create_from_host_name() { DefaultEndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("localhost", 9042)); @@ -54,9 +53,8 @@ public void should_create_from_unresolved_address() { @Test public void should_reject_null_address() { - thrown.expect(NullPointerException.class); - thrown.expectMessage("address can't be null"); - - new DefaultEndPoint(null); + assertThatThrownBy(() -> new DefaultEndPoint(null)) + .isInstanceOf(NullPointerException.class) + .hasMessage("address can't be null"); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java index 79e56e1d832..b463f9caa7b 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultMetadataTokenMapTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,6 +24,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.token.DefaultReplicationStrategyFactory; import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory; @@ -53,9 +56,11 @@ public class DefaultMetadataTokenMapTest { "class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")); @Mock private InternalDriverContext context; + @Mock private ChannelFactory channelFactory; @Before public void setup() { + when(context.getChannelFactory()).thenReturn(channelFactory); DefaultReplicationStrategyFactory replicationStrategyFactory = new DefaultReplicationStrategyFactory(context); when(context.getReplicationStrategyFactory()).thenReturn(replicationStrategyFactory); @@ -65,7 +70,7 @@ public void setup() { public void should_not_build_token_map_when_initializing_with_contact_points() { DefaultMetadata contactPointsMetadata = new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null); + ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); assertThat(contactPointsMetadata.getTokenMap()).isNotPresent(); } @@ -73,7 +78,7 @@ public void should_not_build_token_map_when_initializing_with_contact_points() { public void should_build_minimal_token_map_on_first_refresh() { DefaultMetadata contactPointsMetadata = new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null); + ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); DefaultMetadata firstRefreshMetadata = contactPointsMetadata.withNodes( ImmutableMap.of(NODE1.getHostId(), NODE1), @@ -88,7 +93,7 @@ public void should_build_minimal_token_map_on_first_refresh() { public void should_not_build_token_map_when_disabled() { DefaultMetadata contactPointsMetadata = new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null); + ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); DefaultMetadata firstRefreshMetadata = contactPointsMetadata.withNodes( ImmutableMap.of(NODE1.getHostId(), NODE1), @@ -103,7 +108,7 @@ public void should_not_build_token_map_when_disabled() { public void should_stay_empty_on_first_refresh_if_partitioner_missing() { DefaultMetadata contactPointsMetadata = new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null); + ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); DefaultMetadata firstRefreshMetadata = contactPointsMetadata.withNodes( ImmutableMap.of(NODE1.getHostId(), NODE1), true, true, null, context); @@ -114,7 +119,7 @@ public void should_stay_empty_on_first_refresh_if_partitioner_missing() { public void should_update_minimal_token_map_if_new_node_and_still_no_schema() { DefaultMetadata contactPointsMetadata = new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null); + ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); DefaultMetadata firstRefreshMetadata = contactPointsMetadata.withNodes( ImmutableMap.of(NODE1.getHostId(), NODE1), @@ -136,7 +141,7 @@ public void should_update_minimal_token_map_if_new_node_and_still_no_schema() { public void should_update_token_map_when_schema_changes() { DefaultMetadata contactPointsMetadata = new DefaultMetadata( - ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null); + ImmutableMap.of(NODE1.getHostId(), NODE1), Collections.emptyMap(), null, null); DefaultMetadata firstRefreshMetadata = contactPointsMetadata.withNodes( ImmutableMap.of(NODE1.getHostId(), NODE1), diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java new file mode 100644 index 00000000000..6a53fe3e433 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultNodeTest.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.internal.core.context.MockedDriverContextFactory; +import java.net.InetSocketAddress; +import java.util.UUID; +import org.junit.Test; + +public class DefaultNodeTest { + + private final String uuidStr = "1e4687e6-f94e-432e-a792-216f89ef265f"; + private final UUID hostId = UUID.fromString(uuidStr); + private final EndPoint endPoint = new DefaultEndPoint(new InetSocketAddress("localhost", 9042)); + + @Test + public void should_have_expected_string_representation() { + + DefaultNode node = new DefaultNode(endPoint, MockedDriverContextFactory.defaultDriverContext()); + node.hostId = hostId; + + String expected = + String.format( + "Node(endPoint=localhost/127.0.0.1:9042, hostId=1e4687e6-f94e-432e-a792-216f89ef265f, hashCode=%x)", + node.hashCode()); + assertThat(node.toString()).isEqualTo(expected); + } + + @Test + public void should_have_expected_string_representation_if_hostid_is_null() { + + DefaultNode node = new DefaultNode(endPoint, MockedDriverContextFactory.defaultDriverContext()); + node.hostId = null; + + String expected = + String.format( + "Node(endPoint=localhost/127.0.0.1:9042, hostId=null, hashCode=%x)", node.hashCode()); + assertThat(node.toString()).isEqualTo(expected); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java index 084e74b1f74..dd40f233518 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/DefaultTopologyMonitorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +20,9 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.filter; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -26,10 +30,15 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; import com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; @@ -42,9 +51,11 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import com.datastax.oss.driver.shaded.guava.common.collect.Maps; import com.datastax.oss.protocol.internal.Message; import com.datastax.oss.protocol.internal.ProtocolConstants; import com.datastax.oss.protocol.internal.response.Error; +import com.google.common.collect.Streams; import com.tngtech.java.junit.dataprovider.DataProvider; import com.tngtech.java.junit.dataprovider.DataProviderRunner; import com.tngtech.java.junit.dataprovider.UseDataProvider; @@ -62,16 +73,19 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.slf4j.LoggerFactory; @RunWith(DataProviderRunner.class) public class DefaultTopologyMonitorTest { - private static final InetSocketAddress ADDRESS1 = new InetSocketAddress("127.0.0.1", 9042); private static final InetSocketAddress ADDRESS2 = new InetSocketAddress("127.0.0.2", 9042); @Mock private InternalDriverContext context; @@ -81,11 +95,19 @@ public class DefaultTopologyMonitorTest { @Mock private DriverChannel channel; @Mock protected MetricsFactory metricsFactory; + @Mock private Appender appender; + @Captor private ArgumentCaptor loggingEventCaptor; + + @Mock private SslEngineFactory sslEngineFactory; + private DefaultNode node1; private DefaultNode node2; private TestTopologyMonitor topologyMonitor; + private Logger logger; + private Level initialLogLevel; + @Before public void setup() { MockitoAnnotations.initMocks(this); @@ -107,6 +129,17 @@ public void setup() { when(context.getControlConnection()).thenReturn(controlConnection); topologyMonitor = new TestTopologyMonitor(context); + + logger = (Logger) LoggerFactory.getLogger(DefaultTopologyMonitor.class); + initialLogLevel = logger.getLevel(); + logger.setLevel(Level.INFO); + logger.addAppender(appender); + } + + @After + public void teardown() { + logger.detachAppender(appender); + logger.setLevel(initialLogLevel); } @Test @@ -202,7 +235,7 @@ public void should_refresh_node_from_peers_if_broadcast_address_is_not_present() verify(peer3).getUuid("host_id"); verify(peer3, never()).getString(anyString()); - verify(peer2, times(3)).getUuid("host_id"); + verify(peer2, times(2)).getUuid("host_id"); verify(peer2).getString("data_center"); } @@ -232,22 +265,22 @@ public void should_refresh_node_from_peers_if_broadcast_address_is_not_present_V verify(peer3).getUuid("host_id"); verify(peer3, never()).getString(anyString()); - verify(peer2, times(3)).getUuid("host_id"); + verify(peer2, times(2)).getUuid("host_id"); verify(peer2).getString("data_center"); } @Test public void should_get_new_node_from_peers() { // Given - AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - AdminRow peer1 = mockPeersRow(1, node1.getHostId()); + AdminRow peer3 = mockPeersRow(4, UUID.randomUUID()); + AdminRow peer2 = mockPeersRow(3, node2.getHostId()); + AdminRow peer1 = mockPeersRow(2, node1.getHostId()); topologyMonitor.isSchemaV2 = false; topologyMonitor.stubQueries( new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2, peer1))); // When - CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS1); + CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS2); // Then assertThatStage(futureInfo) @@ -255,7 +288,7 @@ public void should_get_new_node_from_peers() { maybeInfo -> { assertThat(maybeInfo.isPresent()).isTrue(); NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc1"); + assertThat(info.getDatacenter()).isEqualTo("dc2"); }); // The rpc_address in each row should have been tried, only the last row should have been // converted @@ -265,22 +298,22 @@ public void should_get_new_node_from_peers() { verify(peer2).getInetAddress("rpc_address"); verify(peer2, never()).getString(anyString()); - verify(peer1, times(2)).getInetAddress("rpc_address"); + verify(peer1).getInetAddress("rpc_address"); verify(peer1).getString("data_center"); } @Test public void should_get_new_node_from_peers_v2() { // Given - AdminRow peer3 = mockPeersV2Row(3, UUID.randomUUID()); - AdminRow peer2 = mockPeersV2Row(2, node2.getHostId()); - AdminRow peer1 = mockPeersV2Row(1, node1.getHostId()); + AdminRow peer3 = mockPeersV2Row(4, UUID.randomUUID()); + AdminRow peer2 = mockPeersV2Row(3, node2.getHostId()); + AdminRow peer1 = mockPeersV2Row(2, node1.getHostId()); topologyMonitor.isSchemaV2 = true; topologyMonitor.stubQueries( new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2, peer1))); // When - CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS1); + CompletionStage> futureInfo = topologyMonitor.getNewNodeInfo(ADDRESS2); // Then assertThatStage(futureInfo) @@ -288,7 +321,7 @@ public void should_get_new_node_from_peers_v2() { maybeInfo -> { assertThat(maybeInfo.isPresent()).isTrue(); NodeInfo info = maybeInfo.get(); - assertThat(info.getDatacenter()).isEqualTo("dc1"); + assertThat(info.getDatacenter()).isEqualTo("dc2"); }); // The natove in each row should have been tried, only the last row should have been // converted @@ -298,7 +331,7 @@ public void should_get_new_node_from_peers_v2() { verify(peer2).getInetAddress("native_address"); verify(peer2, never()).getString(anyString()); - verify(peer1, times(2)).getInetAddress("native_address"); + verify(peer1).getInetAddress("native_address"); verify(peer1).getString("data_center"); } @@ -341,11 +374,7 @@ public void should_skip_invalid_peers_row(String columnToCheck) { topologyMonitor.isSchemaV2 = false; node2.broadcastAddress = ADDRESS2; AdminRow peer2 = mockPeersRow(2, node2.getHostId()); - if (columnToCheck.equals("rpc_address")) { - when(peer2.getInetAddress(columnToCheck)).thenReturn(null); - } else if (columnToCheck.equals("host_id")) { - when(peer2.getUuid(columnToCheck)).thenReturn(null); - } + when(peer2.isNull(columnToCheck)).thenReturn(true); topologyMonitor.stubQueries( new StubbedQuery( "SELECT * FROM system.peers WHERE peer = :address", @@ -358,6 +387,10 @@ public void should_skip_invalid_peers_row(String columnToCheck) { // Then assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo).isEmpty()); assertThat(node2.broadcastAddress).isNotNull().isEqualTo(ADDRESS2); + assertLog( + Level.WARN, + "[null] Found invalid row in system.peers for peer: /127.0.0.2. " + + "This is likely a gossip or snitch issue, this node will be ignored."); } @Test @@ -367,17 +400,7 @@ public void should_skip_invalid_peers_row_v2(String columnToCheck) { topologyMonitor.isSchemaV2 = true; node2.broadcastAddress = ADDRESS2; AdminRow peer2 = mockPeersV2Row(2, node2.getHostId()); - switch (columnToCheck) { - case "native_address": - when(peer2.getInetAddress(columnToCheck)).thenReturn(null); - break; - case "native_port": - when(peer2.getInteger(columnToCheck)).thenReturn(null); - break; - case "host_id": - when(peer2.getUuid(columnToCheck)).thenReturn(null); - break; - } + when(peer2.isNull(columnToCheck)).thenReturn(true); topologyMonitor.stubQueries( new StubbedQuery( "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port", @@ -390,29 +413,194 @@ public void should_skip_invalid_peers_row_v2(String columnToCheck) { // Then assertThatStage(futureInfo).isSuccess(maybeInfo -> assertThat(maybeInfo).isEmpty()); assertThat(node2.broadcastAddress).isNotNull().isEqualTo(ADDRESS2); + assertLog( + Level.WARN, + "[null] Found invalid row in system.peers_v2 for peer: /127.0.0.2. " + + "This is likely a gossip or snitch issue, this node will be ignored."); } - @DataProvider - public static Object[][] columnsToCheckV1() { - return new Object[][] {{"rpc_address"}, {"host_id"}}; + @Test + public void should_stop_executing_queries_once_closed() { + // Given + topologyMonitor.close(); + + // When + CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); + + // Then + assertThatStage(futureInfos) + .isFailed(error -> assertThat(error).isInstanceOf(IllegalStateException.class)); } - @DataProvider - public static Object[][] columnsToCheckV2() { - return new Object[][] {{"native_address"}, {"native_port"}, {"host_id"}}; + @Test + public void should_warn_when_control_host_found_in_system_peers() { + // Given + AdminRow local = mockLocalRow(1, node1.getHostId()); + AdminRow peer1 = mockPeersRow(1, node2.getHostId()); // invalid + AdminRow peer2 = mockPeersRow(2, node2.getHostId()); + AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); + topologyMonitor.stubQueries( + new StubbedQuery("SELECT * FROM system.local", mockResult(local)), + new StubbedQuery("SELECT * FROM system.peers_v2", Collections.emptyMap(), null, true), + new StubbedQuery("SELECT * FROM system.peers", mockResult(peer3, peer2, peer1))); + + // When + CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); + + // Then + assertThatStage(futureInfos) + .isSuccess( + infos -> + assertThat(infos) + .hasSize(3) + .extractingResultOf("getEndPoint") + .containsOnlyOnce(node1.getEndPoint())); + assertLogContains( + Level.WARN, + "[null] Control node /127.0.0.1:9042 has an entry for itself in system.peers: " + + "this entry will be ignored. This is likely due to a misconfiguration; " + + "please verify your rpc_address configuration in cassandra.yaml on " + + "all nodes in your cluster."); } @Test - public void should_stop_executing_queries_once_closed() { + public void should_warn_when_control_host_found_in_system_peers_v2() { // Given - topologyMonitor.close(); + AdminRow local = mockLocalRow(1, node1.getHostId()); + AdminRow peer3 = mockPeersRow(3, UUID.randomUUID()); + AdminRow peer2 = mockPeersRow(2, node2.getHostId()); + AdminRow peer1 = mockPeersRow(1, node2.getHostId()); // invalid + topologyMonitor.stubQueries( + new StubbedQuery("SELECT * FROM system.local", mockResult(local)), + new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer3, peer2, peer1))); // When CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); // Then assertThatStage(futureInfos) - .isFailed(error -> assertThat(error).isInstanceOf(IllegalStateException.class)); + .isSuccess( + infos -> + assertThat(infos) + .hasSize(3) + .extractingResultOf("getEndPoint") + .containsOnlyOnce(node1.getEndPoint())); + assertLogContains( + Level.WARN, + "[null] Control node /127.0.0.1:9042 has an entry for itself in system.peers_v2: " + + "this entry will be ignored. This is likely due to a misconfiguration; " + + "please verify your rpc_address configuration in cassandra.yaml on " + + "all nodes in your cluster."); + } + + // Confirm the base case of extracting peer info from peers_v2, no SSL involved + @Test + public void should_get_peer_address_info_peers_v2() { + // Given + AdminRow local = mockLocalRow(1, node1.getHostId()); + AdminRow peer2 = mockPeersV2Row(3, node2.getHostId()); + AdminRow peer1 = mockPeersV2Row(2, node1.getHostId()); + topologyMonitor.isSchemaV2 = true; + topologyMonitor.stubQueries( + new StubbedQuery("SELECT * FROM system.local", mockResult(local)), + new StubbedQuery("SELECT * FROM system.peers_v2", mockResult(peer2, peer1))); + when(context.getSslEngineFactory()).thenReturn(Optional.empty()); + + // When + CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); + + // Then + assertThatStage(futureInfos) + .isSuccess( + infos -> { + Iterator iterator = infos.iterator(); + // First NodeInfo is for local, skip past that + iterator.next(); + NodeInfo peer2nodeInfo = iterator.next(); + assertThat(peer2nodeInfo.getEndPoint().resolve()) + .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); + NodeInfo peer1nodeInfo = iterator.next(); + assertThat(peer1nodeInfo.getEndPoint().resolve()) + .isEqualTo(new InetSocketAddress("127.0.0.2", 9042)); + }); + } + + // Confirm the base case of extracting peer info from DSE peers table, no SSL involved + @Test + public void should_get_peer_address_info_peers_dse() { + // Given + AdminRow local = mockLocalRow(1, node1.getHostId()); + AdminRow peer2 = mockPeersRowDse(3, node2.getHostId()); + AdminRow peer1 = mockPeersRowDse(2, node1.getHostId()); + topologyMonitor.isSchemaV2 = true; + topologyMonitor.stubQueries( + new StubbedQuery("SELECT * FROM system.local", mockResult(local)), + new StubbedQuery("SELECT * FROM system.peers_v2", Maps.newHashMap(), null, true), + new StubbedQuery("SELECT * FROM system.peers", mockResult(peer2, peer1))); + when(context.getSslEngineFactory()).thenReturn(Optional.empty()); + + // When + CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); + + // Then + assertThatStage(futureInfos) + .isSuccess( + infos -> { + Iterator iterator = infos.iterator(); + // First NodeInfo is for local, skip past that + iterator.next(); + NodeInfo peer2nodeInfo = iterator.next(); + assertThat(peer2nodeInfo.getEndPoint().resolve()) + .isEqualTo(new InetSocketAddress("127.0.0.3", 9042)); + NodeInfo peer1nodeInfo = iterator.next(); + assertThat(peer1nodeInfo.getEndPoint().resolve()) + .isEqualTo(new InetSocketAddress("127.0.0.2", 9042)); + }); + } + + // Confirm the base case of extracting peer info from DSE peers table, this time with SSL + @Test + public void should_get_peer_address_info_peers_dse_with_ssl() { + // Given + AdminRow local = mockLocalRow(1, node1.getHostId()); + AdminRow peer2 = mockPeersRowDseWithSsl(3, node2.getHostId()); + AdminRow peer1 = mockPeersRowDseWithSsl(2, node1.getHostId()); + topologyMonitor.isSchemaV2 = true; + topologyMonitor.stubQueries( + new StubbedQuery("SELECT * FROM system.local", mockResult(local)), + new StubbedQuery("SELECT * FROM system.peers_v2", Maps.newHashMap(), null, true), + new StubbedQuery("SELECT * FROM system.peers", mockResult(peer2, peer1))); + when(context.getSslEngineFactory()).thenReturn(Optional.of(sslEngineFactory)); + + // When + CompletionStage> futureInfos = topologyMonitor.refreshNodeList(); + + // Then + assertThatStage(futureInfos) + .isSuccess( + infos -> { + Iterator iterator = infos.iterator(); + // First NodeInfo is for local, skip past that + iterator.next(); + NodeInfo peer2nodeInfo = iterator.next(); + assertThat(peer2nodeInfo.getEndPoint().resolve()) + .isEqualTo(new InetSocketAddress("127.0.0.3", 9043)); + NodeInfo peer1nodeInfo = iterator.next(); + assertThat(peer1nodeInfo.getEndPoint().resolve()) + .isEqualTo(new InetSocketAddress("127.0.0.2", 9043)); + }); + } + + @DataProvider + public static Object[][] columnsToCheckV1() { + return new Object[][] {{"rpc_address"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"}}; + } + + @DataProvider + public static Object[][] columnsToCheckV2() { + return new Object[][] { + {"native_address"}, {"native_port"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"} + }; } /** Mocks the query execution logic. */ @@ -473,18 +661,23 @@ private StubbedQuery(String queryString, AdminResult result) { private AdminRow mockLocalRow(int i, UUID hostId) { try { AdminRow row = mock(AdminRow.class); + when(row.isNull("host_id")).thenReturn(hostId == null); when(row.getUuid("host_id")).thenReturn(hostId); when(row.getInetAddress("broadcast_address")) .thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("data_center")).thenReturn(false); when(row.getString("data_center")).thenReturn("dc" + i); when(row.getInetAddress("listen_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("rack")).thenReturn(false); when(row.getString("rack")).thenReturn("rack" + i); when(row.getString("release_version")).thenReturn("release_version" + i); // The driver should not use this column for the local row, because it can contain the // non-broadcast RPC address. Simulate the bug to ensure it's handled correctly. + when(row.isNull("rpc_address")).thenReturn(false); when(row.getInetAddress("rpc_address")).thenReturn(InetAddress.getByName("0.0.0.0")); + when(row.isNull("tokens")).thenReturn(false); when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); when(row.contains("peer")).thenReturn(false); return row; @@ -497,14 +690,23 @@ private AdminRow mockLocalRow(int i, UUID hostId) { private AdminRow mockPeersRow(int i, UUID hostId) { try { AdminRow row = mock(AdminRow.class); + when(row.isNull("host_id")).thenReturn(hostId == null); when(row.getUuid("host_id")).thenReturn(hostId); when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("data_center")).thenReturn(false); when(row.getString("data_center")).thenReturn("dc" + i); + when(row.isNull("rack")).thenReturn(false); when(row.getString("rack")).thenReturn("rack" + i); when(row.getString("release_version")).thenReturn("release_version" + i); + when(row.isNull("rpc_address")).thenReturn(false); when(row.getInetAddress("rpc_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("tokens")).thenReturn(false); when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); when(row.contains("peer")).thenReturn(true); + + when(row.isNull("native_address")).thenReturn(true); + when(row.isNull("native_port")).thenReturn(true); + return row; } catch (UnknownHostException e) { fail("unexpected", e); @@ -515,18 +717,56 @@ private AdminRow mockPeersRow(int i, UUID hostId) { private AdminRow mockPeersV2Row(int i, UUID hostId) { try { AdminRow row = mock(AdminRow.class); + when(row.isNull("host_id")).thenReturn(hostId == null); when(row.getUuid("host_id")).thenReturn(hostId); when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); when(row.getInteger("peer_port")).thenReturn(7000 + i); + when(row.isNull("data_center")).thenReturn(false); when(row.getString("data_center")).thenReturn("dc" + i); + when(row.isNull("rack")).thenReturn(false); when(row.getString("rack")).thenReturn("rack" + i); when(row.getString("release_version")).thenReturn("release_version" + i); + when(row.isNull("native_address")).thenReturn(false); when(row.getInetAddress("native_address")).thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("native_port")).thenReturn(false); when(row.getInteger("native_port")).thenReturn(9042); + when(row.isNull("tokens")).thenReturn(false); when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); when(row.contains("peer")).thenReturn(true); when(row.contains("peer_port")).thenReturn(true); when(row.contains("native_port")).thenReturn(true); + + when(row.isNull("rpc_address")).thenReturn(true); + return row; + } catch (UnknownHostException e) { + fail("unexpected", e); + return null; + } + } + + // Mock row for DSE ~6.8 + private AdminRow mockPeersRowDse(int i, UUID hostId) { + try { + AdminRow row = mock(AdminRow.class); + when(row.contains("peer")).thenReturn(true); + when(row.isNull("data_center")).thenReturn(false); + when(row.getString("data_center")).thenReturn("dc" + i); + when(row.getString("dse_version")).thenReturn("6.8.30"); + when(row.contains("graph")).thenReturn(true); + when(row.isNull("host_id")).thenReturn(hostId == null); + when(row.getUuid("host_id")).thenReturn(hostId); + when(row.getInetAddress("peer")).thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("rack")).thenReturn(false); + when(row.getString("rack")).thenReturn("rack" + i); + when(row.isNull("native_transport_address")).thenReturn(false); + when(row.getInetAddress("native_transport_address")) + .thenReturn(InetAddress.getByName("127.0.0." + i)); + when(row.isNull("native_transport_port")).thenReturn(false); + when(row.getInteger("native_transport_port")).thenReturn(9042); + when(row.isNull("tokens")).thenReturn(false); + when(row.getSetOfString("tokens")).thenReturn(ImmutableSet.of("token" + i)); + when(row.isNull("rpc_address")).thenReturn(false); + return row; } catch (UnknownHostException e) { fail("unexpected", e); @@ -534,9 +774,32 @@ private AdminRow mockPeersV2Row(int i, UUID hostId) { } } + private AdminRow mockPeersRowDseWithSsl(int i, UUID hostId) { + AdminRow row = mockPeersRowDse(i, hostId); + when(row.isNull("native_transport_port_ssl")).thenReturn(false); + when(row.getInteger("native_transport_port_ssl")).thenReturn(9043); + return row; + } + private AdminResult mockResult(AdminRow... rows) { AdminResult result = mock(AdminResult.class); when(result.iterator()).thenReturn(Iterators.forArray(rows)); return result; } + + private void assertLog(Level level, String message) { + verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); + Iterable logs = + filter(loggingEventCaptor.getAllValues()).with("level", level).get(); + assertThat(logs).hasSize(1); + assertThat(logs.iterator().next().getFormattedMessage()).contains(message); + } + + private void assertLogContains(Level level, String message) { + verify(appender, atLeast(1)).doAppend(loggingEventCaptor.capture()); + Iterable logs = + filter(loggingEventCaptor.getAllValues()).with("level", level).get(); + assertThat( + Streams.stream(logs).map(ILoggingEvent::getFormattedMessage).anyMatch(message::contains)); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java index 1d7b0b0d02f..679ec1be037 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/FullNodeListRefreshTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +20,9 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static org.mockito.Mockito.when; +import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; @@ -36,18 +40,23 @@ public class FullNodeListRefreshTest { @Mock private InternalDriverContext context; @Mock protected MetricsFactory metricsFactory; + @Mock private ChannelFactory channelFactory; private DefaultNode node1; private DefaultNode node2; - private DefaultNode node3; + private EndPoint endPoint3; + private UUID hostId3; @Before public void setup() { when(context.getMetricsFactory()).thenReturn(metricsFactory); + when(context.getChannelFactory()).thenReturn(channelFactory); node1 = TestNodeFactory.newNode(1, context); node2 = TestNodeFactory.newNode(2, context); - node3 = TestNodeFactory.newNode(3, context); + + endPoint3 = TestNodeFactory.newEndPoint(3); + hostId3 = UUID.randomUUID(); } @Test @@ -57,6 +66,7 @@ public void should_add_and_remove_nodes() { new DefaultMetadata( ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), Collections.emptyMap(), + null, null); Iterable newInfos = ImmutableList.of( @@ -64,18 +74,15 @@ public void should_add_and_remove_nodes() { .withEndPoint(node2.getEndPoint()) .withHostId(node2.getHostId()) .build(), - DefaultNodeInfo.builder() - .withEndPoint(node3.getEndPoint()) - .withHostId(node3.getHostId()) - .build()); + DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build()); FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); // When MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); // Then - assertThat(result.newMetadata.getNodes()) - .containsOnlyKeys(node2.getHostId(), node3.getHostId()); + assertThat(result.newMetadata.getNodes()).containsOnlyKeys(node2.getHostId(), hostId3); + DefaultNode node3 = (DefaultNode) result.newMetadata.getNodes().get(hostId3); assertThat(result.events) .containsOnly(NodeStateEvent.removed(node1), NodeStateEvent.added(node3)); } @@ -87,6 +94,7 @@ public void should_update_existing_nodes() { new DefaultMetadata( ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), Collections.emptyMap(), + null, null); UUID schemaVersion1 = Uuids.random(); @@ -123,4 +131,50 @@ public void should_update_existing_nodes() { assertThat(node2.getSchemaVersion()).isEqualTo(schemaVersion2); assertThat(result.events).isEmpty(); } + + @Test + public void should_ignore_duplicate_host_ids() { + // Given + DefaultMetadata oldMetadata = + new DefaultMetadata( + ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), + Collections.emptyMap(), + null, + null); + + Iterable newInfos = + ImmutableList.of( + DefaultNodeInfo.builder() + .withEndPoint(node1.getEndPoint()) + .withDatacenter("dc1") + .withRack("rack1") + .withHostId(node1.getHostId()) + .build(), + DefaultNodeInfo.builder() + .withEndPoint(node2.getEndPoint()) + .withDatacenter("dc1") + .withRack("rack2") + .withHostId(node2.getHostId()) + .build(), + // Duplicate host id for node 2, should be ignored: + DefaultNodeInfo.builder() + .withEndPoint(node2.getEndPoint()) + .withDatacenter("dc1") + .withRack("rack3") + .withHostId(node2.getHostId()) + .build()); + FullNodeListRefresh refresh = new FullNodeListRefresh(newInfos); + + // When + MetadataRefresh.Result result = refresh.compute(oldMetadata, false, context); + + // Then + assertThat(result.newMetadata.getNodes()) + .containsOnlyKeys(node1.getHostId(), node2.getHostId()); + assertThat(node1.getDatacenter()).isEqualTo("dc1"); + assertThat(node1.getRack()).isEqualTo("rack1"); + assertThat(node2.getDatacenter()).isEqualTo("dc1"); + assertThat(node2.getRack()).isEqualTo("rack2"); + assertThat(result.events).isEmpty(); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java new file mode 100644 index 00000000000..3787bf8fe10 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/InitialNodeListRefreshTest.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.Map; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class InitialNodeListRefreshTest { + + @Mock private InternalDriverContext context; + @Mock protected MetricsFactory metricsFactory; + @Mock private ChannelFactory channelFactory; + + private DefaultNode contactPoint1; + private DefaultNode contactPoint2; + private EndPoint endPoint3; + private UUID hostId1; + private UUID hostId2; + private UUID hostId3; + private UUID hostId4; + private UUID hostId5; + + @Before + public void setup() { + when(context.getMetricsFactory()).thenReturn(metricsFactory); + when(context.getChannelFactory()).thenReturn(channelFactory); + + contactPoint1 = TestNodeFactory.newContactPoint(1, context); + contactPoint2 = TestNodeFactory.newContactPoint(2, context); + + endPoint3 = TestNodeFactory.newEndPoint(3); + hostId1 = UUID.randomUUID(); + hostId2 = UUID.randomUUID(); + hostId3 = UUID.randomUUID(); + hostId4 = UUID.randomUUID(); + hostId5 = UUID.randomUUID(); + } + + @Test + public void should_copy_contact_points_on_first_endpoint_match_only() { + // Given + Iterable newInfos = + ImmutableList.of( + DefaultNodeInfo.builder() + .withEndPoint(contactPoint1.getEndPoint()) + // in practice there are more fields, but hostId is enough to validate the logic + .withHostId(hostId1) + .build(), + DefaultNodeInfo.builder() + .withEndPoint(contactPoint2.getEndPoint()) + .withHostId(hostId2) + .build(), + DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build(), + DefaultNodeInfo.builder() + // address translator can translate node addresses to the same endpoints + .withEndPoint(contactPoint2.getEndPoint()) + .withHostId(hostId4) + .build(), + DefaultNodeInfo.builder() + // address translator can translate node addresses to the same endpoints + .withEndPoint(endPoint3) + .withHostId(hostId5) + .build()); + InitialNodeListRefresh refresh = + new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1, contactPoint2)); + + // When + MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); + + // Then + // contact points have been copied to the metadata, and completed with missing information + Map newNodes = result.newMetadata.getNodes(); + assertThat(newNodes).containsOnlyKeys(hostId1, hostId2, hostId3, hostId4, hostId5); + assertThat(newNodes.get(hostId1)).isEqualTo(contactPoint1); + assertThat(contactPoint1.getHostId()).isEqualTo(hostId1); + assertThat(newNodes.get(hostId2)).isEqualTo(contactPoint2); + assertThat(contactPoint2.getHostId()).isEqualTo(hostId2); + // And + // node has been added for the new endpoint + assertThat(newNodes.get(hostId3).getEndPoint()).isEqualTo(endPoint3); + assertThat(newNodes.get(hostId3).getHostId()).isEqualTo(hostId3); + // And + // nodes have been added for duplicated endpoints + assertThat(newNodes.get(hostId4).getEndPoint()).isEqualTo(contactPoint2.getEndPoint()); + assertThat(newNodes.get(hostId4).getHostId()).isEqualTo(hostId4); + assertThat(newNodes.get(hostId5).getEndPoint()).isEqualTo(endPoint3); + assertThat(newNodes.get(hostId5).getHostId()).isEqualTo(hostId5); + assertThat(result.events) + .containsExactlyInAnyOrder( + NodeStateEvent.added((DefaultNode) newNodes.get(hostId3)), + NodeStateEvent.added((DefaultNode) newNodes.get(hostId4)), + NodeStateEvent.added((DefaultNode) newNodes.get(hostId5))); + } + + @Test + public void should_add_other_nodes() { + // Given + Iterable newInfos = + ImmutableList.of( + DefaultNodeInfo.builder() + .withEndPoint(contactPoint1.getEndPoint()) + // in practice there are more fields, but hostId is enough to validate the logic + .withHostId(hostId1) + .build(), + DefaultNodeInfo.builder() + .withEndPoint(contactPoint2.getEndPoint()) + .withHostId(hostId2) + .build(), + DefaultNodeInfo.builder().withEndPoint(endPoint3).withHostId(hostId3).build()); + InitialNodeListRefresh refresh = + new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1, contactPoint2)); + + // When + MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); + + // Then + // new node created in addition to the contact points + Map newNodes = result.newMetadata.getNodes(); + assertThat(newNodes).containsOnlyKeys(hostId1, hostId2, hostId3); + Node node3 = newNodes.get(hostId3); + assertThat(node3.getEndPoint()).isEqualTo(endPoint3); + assertThat(node3.getHostId()).isEqualTo(hostId3); + } + + @Test + public void should_ignore_duplicate_host_ids() { + // Given + Iterable newInfos = + ImmutableList.of( + DefaultNodeInfo.builder() + .withEndPoint(contactPoint1.getEndPoint()) + // in practice there are more fields, but hostId is enough to validate the logic + .withHostId(hostId1) + .withDatacenter("dc1") + .build(), + DefaultNodeInfo.builder() + .withEndPoint(contactPoint1.getEndPoint()) + .withDatacenter("dc2") + .withHostId(hostId1) + .build()); + InitialNodeListRefresh refresh = + new InitialNodeListRefresh(newInfos, ImmutableSet.of(contactPoint1)); + + // When + MetadataRefresh.Result result = refresh.compute(DefaultMetadata.EMPTY, false, context); + + // Then + // only the first nodeInfo should have been copied + Map newNodes = result.newMetadata.getNodes(); + assertThat(newNodes).containsOnlyKeys(hostId1); + assertThat(newNodes.get(hostId1)).isEqualTo(contactPoint1); + assertThat(contactPoint1.getHostId()).isEqualTo(hostId1); + assertThat(contactPoint1.getDatacenter()).isEqualTo("dc1"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java index d7be8e96b0b..1a0292e3947 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/LoadBalancingPolicyWrapperTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +20,6 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -41,6 +42,7 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import com.datastax.oss.driver.shaded.guava.common.collect.Lists; import java.util.Map; +import java.util.Objects; import java.util.Queue; import java.util.Set; import java.util.UUID; @@ -54,7 +56,6 @@ import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; @RunWith(MockitoJUnitRunner.class) public class LoadBalancingPolicyWrapperTest { @@ -63,9 +64,8 @@ public class LoadBalancingPolicyWrapperTest { private DefaultNode node2; private DefaultNode node3; - private Map allNodes; private Set contactPoints; - private Queue defaultPolicysQueryPlan; + private Queue defaultPolicyQueryPlan; @Mock private InternalDriverContext context; @Mock private LoadBalancingPolicy policy1; @@ -88,16 +88,18 @@ public void setup() { node3 = TestNodeFactory.newNode(3, context); contactPoints = ImmutableSet.of(node1, node2); - allNodes = + Map allNodes = ImmutableMap.of( - node1.getHostId(), node1, node2.getHostId(), node2, node3.getHostId(), node3); + Objects.requireNonNull(node1.getHostId()), node1, + Objects.requireNonNull(node2.getHostId()), node2, + Objects.requireNonNull(node3.getHostId()), node3); when(metadataManager.getMetadata()).thenReturn(metadata); when(metadata.getNodes()).thenReturn(allNodes); when(metadataManager.getContactPoints()).thenReturn(contactPoints); when(context.getMetadataManager()).thenReturn(metadataManager); - defaultPolicysQueryPlan = Lists.newLinkedList(ImmutableList.of(node3, node2, node1)); - when(policy1.newQueryPlan(null, null)).thenReturn(defaultPolicysQueryPlan); + defaultPolicyQueryPlan = Lists.newLinkedList(ImmutableList.of(node3, node2, node1)); + when(policy1.newQueryPlan(null, null)).thenReturn(defaultPolicyQueryPlan); eventBus = spy(new EventBus("test")); when(context.getEventBus()).thenReturn(eventBus); @@ -125,7 +127,7 @@ public void should_build_query_plan_from_contact_points_before_init() { for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { verify(policy, never()).newQueryPlan(null, null); } - assertThat(queryPlan).containsOnlyElementsOf(contactPoints); + assertThat(queryPlan).hasSameElementsAs(contactPoints); } @Test @@ -142,7 +144,7 @@ public void should_fetch_query_plan_from_policy_after_init() { // Then // no-arg newQueryPlan() uses the default profile verify(policy1).newQueryPlan(null, null); - assertThat(queryPlan).isEqualTo(defaultPolicysQueryPlan); + assertThat(queryPlan).isEqualTo(defaultPolicyQueryPlan); } @Test @@ -236,25 +238,16 @@ public void should_propagate_node_states_to_policies_after_init() { @Test public void should_accumulate_events_during_init_and_replay() throws InterruptedException { // Given - // Hack to obtain concurrency: the main thread blocks in init, while another thread fires an - // event on the bus - CountDownLatch eventLatch = new CountDownLatch(3); + // Hack to obtain concurrency: the main thread releases another thread and blocks; then the + // other thread fires an event on the bus and unblocks the main thread. + CountDownLatch eventLatch = new CountDownLatch(1); CountDownLatch initLatch = new CountDownLatch(1); - Answer mockInit = - i -> { - eventLatch.countDown(); - initLatch.await(500, TimeUnit.MILLISECONDS); - return null; - }; - for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { - doAnswer(mockInit).when(policy).init(anyMap(), any(DistanceReporter.class)); - } // When Runnable runnable = () -> { try { - eventLatch.await(500, TimeUnit.MILLISECONDS); + eventLatch.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -266,15 +259,14 @@ public void should_accumulate_events_during_init_and_replay() throws Interrupted wrapper.init(); // Then - // wait for init launch to signal that runnable is complete. - initLatch.await(500, TimeUnit.MILLISECONDS); + // unblock the thread that will fire the event, and waits until it finishes + eventLatch.countDown(); + boolean ok = initLatch.await(500, TimeUnit.MILLISECONDS); + assertThat(ok).isTrue(); for (LoadBalancingPolicy policy : ImmutableList.of(policy1, policy2, policy3)) { verify(policy).onDown(node1); } - if (thread.isAlive()) { - // thread still completing - sleep to allow thread to complete. - Thread.sleep(500); - } + thread.join(500); assertThat(thread.isAlive()).isFalse(); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java index 9e449fff95d..f9a909400f9 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MetadataManagerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,8 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; @@ -31,26 +34,24 @@ import com.datastax.oss.driver.internal.core.context.EventBus; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.context.NettyOptions; +import com.datastax.oss.driver.internal.core.control.ControlConnection; import com.datastax.oss.driver.internal.core.metadata.schema.parsing.SchemaParserFactory; import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaQueriesFactory; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; import io.netty.channel.DefaultEventLoopGroup; -import io.netty.util.concurrent.Future; import java.net.InetSocketAddress; import java.time.Duration; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -65,6 +66,7 @@ public class MetadataManagerTest { @Mock private InternalDriverContext context; @Mock private NettyOptions nettyOptions; + @Mock private ControlConnection controlConnection; @Mock private TopologyMonitor topologyMonitor; @Mock private DriverConfig config; @Mock private DriverExecutionProfile defaultProfile; @@ -86,6 +88,7 @@ public void setup() { when(context.getNettyOptions()).thenReturn(nettyOptions); when(context.getTopologyMonitor()).thenReturn(topologyMonitor); + when(context.getControlConnection()).thenReturn(controlConnection); when(defaultProfile.getDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW)) .thenReturn(Duration.ZERO); @@ -144,7 +147,7 @@ public void should_copy_contact_points_on_refresh_of_all_nodes() { // When CompletionStage refreshNodesFuture = metadataManager.refreshNodes(); - waitForPendingAdminTasks(); + waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); // Then assertThatStage(refreshNodesFuture).isSuccess(); @@ -173,7 +176,7 @@ public void should_refresh_all_nodes() { // When CompletionStage refreshNodesFuture = metadataManager.refreshNodes(); - waitForPendingAdminTasks(); + waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); // Then assertThatStage(refreshNodesFuture).isSuccess(); @@ -189,6 +192,7 @@ public void should_refresh_single_node() { NodeInfo info = mock(NodeInfo.class); when(info.getDatacenter()).thenReturn("dc1"); when(info.getHostId()).thenReturn(UUID.randomUUID()); + when(info.getEndPoint()).thenReturn(node.getEndPoint()); when(topologyMonitor.refreshNode(node)) .thenReturn(CompletableFuture.completedFuture(Optional.of(info))); @@ -227,7 +231,7 @@ public void should_add_node() { // When metadataManager.addNode(broadcastRpcAddress); - waitForPendingAdminTasks(); + waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); // Then assertThat(metadataManager.refreshes).hasSize(1); @@ -250,7 +254,7 @@ public void should_not_add_node_if_broadcast_rpc_address_does_not_match() { // When metadataManager.addNode(broadcastRpcAddress2); - waitForPendingAdminTasks(); + waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); // Then assertThat(metadataManager.refreshes).isEmpty(); @@ -265,7 +269,7 @@ public void should_not_add_node_if_topology_monitor_does_not_have_info() { // When metadataManager.addNode(broadcastRpcAddress2); - waitForPendingAdminTasks(); + waitForPendingAdminTasks(() -> metadataManager.addNodeCount == 1); // Then assertThat(metadataManager.refreshes).isEmpty(); @@ -278,7 +282,7 @@ public void should_remove_node() { // When metadataManager.removeNode(broadcastRpcAddress2); - waitForPendingAdminTasks(); + waitForPendingAdminTasks(() -> metadataManager.removeNodeCount == 1); // Then assertThat(metadataManager.refreshes).hasSize(1); @@ -286,9 +290,34 @@ public void should_remove_node() { assertThat(refresh.broadcastRpcAddressToRemove).isEqualTo(broadcastRpcAddress2); } + @Test + public void refreshSchema_should_work() { + // Given + IllegalStateException expectedException = new IllegalStateException("Error we're testing"); + when(schemaQueriesFactory.newInstance()).thenThrow(expectedException); + when(topologyMonitor.refreshNodeList()) + .thenReturn(CompletableFuture.completedFuture(ImmutableList.of(mock(NodeInfo.class)))); + when(topologyMonitor.checkSchemaAgreement()) + .thenReturn(CompletableFuture.completedFuture(Boolean.TRUE)); + when(controlConnection.init(anyBoolean(), anyBoolean(), anyBoolean())) + .thenReturn(CompletableFuture.completedFuture(null)); + metadataManager.refreshNodes(); // required internal state setup for this + waitForPendingAdminTasks(() -> metadataManager.refreshes.size() == 1); // sanity check + + // When + CompletionStage result = + metadataManager.refreshSchema("foo", true, true); + + // Then + waitForPendingAdminTasks(() -> result.toCompletableFuture().isDone()); + assertThatStage(result).isFailed(t -> assertThat(t).isEqualTo(expectedException)); + } + private static class TestMetadataManager extends MetadataManager { private List refreshes = new CopyOnWriteArrayList<>(); + private volatile int addNodeCount = 0; + private volatile int removeNodeCount = 0; public TestMetadataManager(InternalDriverContext context) { super(context); @@ -300,18 +329,28 @@ Void apply(MetadataRefresh refresh) { refreshes.add(refresh); return null; } + + @Override + public void addNode(InetSocketAddress broadcastRpcAddress) { + // Keep track of addNode calls for condition checking + synchronized (this) { + ++addNodeCount; + } + super.addNode(broadcastRpcAddress); + } + + @Override + public void removeNode(InetSocketAddress broadcastRpcAddress) { + // Keep track of removeNode calls for condition checking + synchronized (this) { + ++removeNodeCount; + } + super.removeNode(broadcastRpcAddress); + } } // Wait for all the tasks on the pool's admin executor to complete. - private void waitForPendingAdminTasks() { - // This works because the event loop group is single-threaded - Future f = adminEventLoopGroup.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 100, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } + private void waitForPendingAdminTasks(Callable condition) { + await().atMost(500, TimeUnit.MILLISECONDS).until(condition); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java new file mode 100644 index 00000000000..8e9f591510a --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/MultiplexingNodeStateListenerTest.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; +import com.datastax.oss.driver.api.core.session.Session; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; + +@RunWith(MockitoJUnitRunner.Strict.class) +public class MultiplexingNodeStateListenerTest { + + @Mock private NodeStateListener child1; + @Mock private NodeStateListener child2; + @Mock private Node node; + @Mock private Session session; + + @Mock private Appender appender; + @Captor private ArgumentCaptor loggingEventCaptor; + + private Logger logger; + private Level initialLogLevel; + + @Before + public void addAppenders() { + logger = (Logger) LoggerFactory.getLogger(MultiplexingNodeStateListener.class); + initialLogLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(appender); + } + + @After + public void removeAppenders() { + logger.detachAppender(appender); + logger.setLevel(initialLogLevel); + } + + @Test + public void should_register() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(); + // when + listener.register(child1); + listener.register(child2); + // then + assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_flatten_child_multiplexing_listener_via_constructor() { + // given + MultiplexingNodeStateListener listener = + new MultiplexingNodeStateListener(new MultiplexingNodeStateListener(child1, child2)); + // when + // then + assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_flatten_child_multiplexing_listener_via_register() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(); + // when + listener.register(new MultiplexingNodeStateListener(child1, child2)); + // then + assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_notify_onUp() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onUp(node); + // when + listener.onUp(node); + // then + verify(child1).onUp(node); + verify(child2).onUp(node); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying node state listener child1 of an onUp event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onDown() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onDown(node); + // when + listener.onDown(node); + // then + verify(child1).onDown(node); + verify(child2).onDown(node); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying node state listener child1 of an onDown event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onAdd() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onAdd(node); + // when + listener.onAdd(node); + // then + verify(child1).onAdd(node); + verify(child2).onAdd(node); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying node state listener child1 of an onAdd event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onRemove() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onRemove(node); + // when + listener.onRemove(node); + // then + verify(child1).onRemove(node); + verify(child2).onRemove(node); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying node state listener child1 of an onRemove event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onSessionReady() { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onSessionReady(session); + // when + listener.onSessionReady(session); + // then + verify(child1).onSessionReady(session); + verify(child2).onSessionReady(session); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying node state listener child1 of an onSessionReady event. (NullPointerException: null)"); + } + + @Test + public void should_notify_close() throws Exception { + // given + MultiplexingNodeStateListener listener = new MultiplexingNodeStateListener(child1, child2); + Exception child1Error = new NullPointerException(); + willThrow(child1Error).given(child1).close(); + // when + listener.close(); + // then + verify(child1).close(); + verify(child2).close(); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while closing node state listener child1. (NullPointerException: null)"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java index 347185bce80..d99b06a33ae 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/NodeStateManagerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -94,7 +96,7 @@ public void setup() { .put(node1.getHostId(), node1) .put(node2.getHostId(), node2) .build(); - Metadata metadata = new DefaultMetadata(nodes, Collections.emptyMap(), null); + Metadata metadata = new DefaultMetadata(nodes, Collections.emptyMap(), null, null); when(metadataManager.getMetadata()).thenReturn(metadata); when(metadataManager.refreshNode(any(Node.class))) .thenReturn(CompletableFuture.completedFuture(null)); @@ -147,7 +149,7 @@ public void should_apply_up_event_if_node_is_unknown_or_down() { } @Test - public void should_add_node_if_up_event_and_not_in_metadata() { + public void should_refresh_node_list_if_up_event_and_not_in_metadata() { // Given new NodeStateManager(context); @@ -157,7 +159,7 @@ public void should_add_node_if_up_event_and_not_in_metadata() { // Then verify(eventBus, never()).fire(any(NodeStateEvent.class)); - verify(metadataManager).addNode(NEW_ADDRESS); + verify(metadataManager).refreshNodes(); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java new file mode 100644 index 00000000000..c1a189259d7 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/PeerRowValidatorTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.datastax.oss.driver.internal.core.metadata; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class PeerRowValidatorTest { + + @DataProvider + public static Object[][] nullColumnsV1() { + return new Object[][] { + {"rpc_address"}, {"host_id"}, {"data_center"}, {"rack"}, {"tokens"}, {"schema_version"} + }; + } + + @DataProvider + public static Object[][] nullColumnsV2() { + return new Object[][] { + {"native_address"}, + {"native_port"}, + {"host_id"}, + {"data_center"}, + {"rack"}, + {"tokens"}, + {"schema_version"} + }; + } + + @Test + @UseDataProvider("nullColumnsV1") + public void should_fail_for_invalid_peer_v1(String nullColumn) { + assertThat(PeerRowValidator.isValid(mockRowV1(nullColumn))).isFalse(); + } + + @Test + @UseDataProvider("nullColumnsV2") + public void should_fail_for_invalid_peer_v2(String nullColumn) { + assertThat(PeerRowValidator.isValid(mockRowV2(nullColumn))).isFalse(); + } + + @Test + public void should_succeed_for_valid_peer_v1() { + AdminRow peerRow = mock(AdminRow.class); + when(peerRow.isNull("host_id")).thenReturn(false); + when(peerRow.isNull("rpc_address")).thenReturn(false); + when(peerRow.isNull("native_address")).thenReturn(true); + when(peerRow.isNull("native_port")).thenReturn(true); + when(peerRow.isNull("data_center")).thenReturn(false); + when(peerRow.isNull("rack")).thenReturn(false); + when(peerRow.isNull("tokens")).thenReturn(false); + when(peerRow.isNull("schema_version")).thenReturn(false); + + assertThat(PeerRowValidator.isValid(peerRow)).isTrue(); + } + + @Test + public void should_succeed_for_valid_peer_v2() { + AdminRow peerRow = mock(AdminRow.class); + when(peerRow.isNull("host_id")).thenReturn(false); + when(peerRow.isNull("rpc_address")).thenReturn(true); + when(peerRow.isNull("native_address")).thenReturn(false); + when(peerRow.isNull("native_port")).thenReturn(false); + when(peerRow.isNull("data_center")).thenReturn(false); + when(peerRow.isNull("rack")).thenReturn(false); + when(peerRow.isNull("tokens")).thenReturn(false); + when(peerRow.isNull("schema_version")).thenReturn(false); + + assertThat(PeerRowValidator.isValid(peerRow)).isTrue(); + } + + private AdminRow mockRowV1(String nullColumn) { + AdminRow peerRow = mock(AdminRow.class); + when(peerRow.isNull("host_id")).thenReturn(nullColumn.equals("host_id")); + when(peerRow.isNull("rpc_address")).thenReturn(nullColumn.equals("rpc_address")); + when(peerRow.isNull("native_address")).thenReturn(true); + when(peerRow.isNull("native_port")).thenReturn(true); + when(peerRow.isNull("data_center")).thenReturn(nullColumn.equals("data_center")); + when(peerRow.isNull("rack")).thenReturn(nullColumn.equals("rack")); + when(peerRow.isNull("tokens")).thenReturn(nullColumn.equals("tokens")); + when(peerRow.isNull("schema_version")).thenReturn(nullColumn.equals("schema_version")); + + return peerRow; + } + + private AdminRow mockRowV2(String nullColumn) { + AdminRow peerRow = mock(AdminRow.class); + when(peerRow.isNull("host_id")).thenReturn(nullColumn.equals("host_id")); + when(peerRow.isNull("native_address")).thenReturn(nullColumn.equals("native_address")); + when(peerRow.isNull("native_port")).thenReturn(nullColumn.equals("native_port")); + when(peerRow.isNull("rpc_address")).thenReturn(true); + when(peerRow.isNull("data_center")).thenReturn(nullColumn.equals("data_center")); + when(peerRow.isNull("rack")).thenReturn(nullColumn.equals("rack")); + when(peerRow.isNull("tokens")).thenReturn(nullColumn.equals("tokens")); + when(peerRow.isNull("schema_version")).thenReturn(nullColumn.equals("schema_version")); + + return peerRow; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java index 29053f2b08e..f2a4b36a3c3 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/RemoveNodeRefreshTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +20,7 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static org.mockito.Mockito.when; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; @@ -33,6 +36,7 @@ public class RemoveNodeRefreshTest { @Mock private InternalDriverContext context; @Mock protected MetricsFactory metricsFactory; + @Mock private ChannelFactory channelFactory; private DefaultNode node1; private DefaultNode node2; @@ -40,6 +44,7 @@ public class RemoveNodeRefreshTest { @Before public void setup() { when(context.getMetricsFactory()).thenReturn(metricsFactory); + when(context.getChannelFactory()).thenReturn(channelFactory); node1 = TestNodeFactory.newNode(1, context); node2 = TestNodeFactory.newNode(2, context); } @@ -51,6 +56,7 @@ public void should_remove_existing_node() { new DefaultMetadata( ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2), Collections.emptyMap(), + null, null); RemoveNodeRefresh refresh = new RemoveNodeRefresh(node2.getBroadcastRpcAddress().get()); @@ -67,7 +73,7 @@ public void should_not_remove_nonexistent_node() { // Given DefaultMetadata oldMetadata = new DefaultMetadata( - ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null); + ImmutableMap.of(node1.getHostId(), node1), Collections.emptyMap(), null, null); RemoveNodeRefresh refresh = new RemoveNodeRefresh(node2.getBroadcastRpcAddress().get()); // When diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java index dc143327ecb..5e0dfbd7802 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/SchemaAgreementCheckerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,11 +37,15 @@ import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.Iterators; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; import io.netty.channel.EventLoop; import java.time.Duration; import java.util.ArrayDeque; import java.util.Arrays; import java.util.Map; +import java.util.Objects; import java.util.Queue; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -49,14 +55,16 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.MockitoAnnotations; -@RunWith(MockitoJUnitRunner.class) +@RunWith(DataProviderRunner.class) public class SchemaAgreementCheckerTest { private static final UUID VERSION1 = UUID.randomUUID(); private static final UUID VERSION2 = UUID.randomUUID(); + private static final UUID NODE_2_HOST_ID = UUID.randomUUID(); + @Mock private InternalDriverContext context; @Mock private DriverConfig config; @Mock private DriverExecutionProfile defaultConfig; @@ -70,10 +78,11 @@ public class SchemaAgreementCheckerTest { @Before public void setup() { + MockitoAnnotations.initMocks(this); when(context.getMetricsFactory()).thenReturn(metricsFactory); node1 = TestNodeFactory.newNode(1, context); - node2 = TestNodeFactory.newNode(2, context); + node2 = TestNodeFactory.newNode(2, NODE_2_HOST_ID, context); when(defaultConfig.getDuration(DefaultDriverOption.CONTROL_CONNECTION_TIMEOUT)) .thenReturn(Duration.ofSeconds(1)); @@ -86,7 +95,12 @@ public void setup() { when(config.getDefaultProfile()).thenReturn(defaultConfig); when(context.getConfig()).thenReturn(config); - Map nodes = ImmutableMap.of(node1.getHostId(), node1, node2.getHostId(), node2); + Map nodes = + ImmutableMap.of( + Objects.requireNonNull(node1.getHostId()), + node1, + Objects.requireNonNull(node2.getHostId()), + node2); when(metadata.getNodes()).thenReturn(nodes); when(metadataManager.getMetadata()).thenReturn(metadata); when(context.getMetadataManager()).thenReturn(metadataManager); @@ -124,9 +138,8 @@ public void should_succeed_if_only_one_node() { checker.stubQueries( new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", mockResult(/*empty*/ ))); + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(/*empty*/ ))); // When CompletionStage future = checker.run(); @@ -142,10 +155,8 @@ public void should_succeed_if_versions_match_on_first_try() { checker.stubQueries( new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", - mockResult(mockRow(node2.getHostId(), VERSION1)))); + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); // When CompletionStage future = checker.run(); @@ -162,10 +173,8 @@ public void should_ignore_down_peers() { checker.stubQueries( new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", - mockResult(mockRow(node2.getHostId(), VERSION2)))); + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION2)))); // When CompletionStage future = checker.run(); @@ -174,17 +183,34 @@ public void should_ignore_down_peers() { assertThatStage(future).isSuccess(b -> assertThat(b).isTrue()); } + @DataProvider + public static Object[][] malformedPeer() { + return new Object[][] { + // missing host id + {mockPeerRow(null, VERSION2, true, true, true, true)}, + // missing schema version + {mockPeerRow(NODE_2_HOST_ID, null, true, true, true, true)}, + // missing datacenter + {mockPeerRow(NODE_2_HOST_ID, VERSION2, false, true, true, true)}, + // missing rack + {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, false, true, true)}, + // missing RPC address + {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, true, false, true)}, + // missing tokens + {mockPeerRow(NODE_2_HOST_ID, VERSION2, true, true, true, false)}, + }; + } + @Test - public void should_ignore_malformed_rows() { + @UseDataProvider("malformedPeer") + public void should_ignore_malformed_rows(AdminRow malformedPeer) { // Given TestSchemaAgreementChecker checker = new TestSchemaAgreementChecker(channel, context); checker.stubQueries( new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", - mockResult(mockRow(null, VERSION2)))); // missing host_id + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(malformedPeer))); // When CompletionStage future = checker.run(); @@ -201,18 +227,14 @@ public void should_reschedule_if_versions_do_not_match_on_first_try() { // First round new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", - mockResult(mockRow(node2.getHostId(), VERSION2))), + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION2))), // Second round new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", - mockResult(mockRow(node2.getHostId(), VERSION1)))); + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); // When CompletionStage future = checker.run(); @@ -230,10 +252,8 @@ public void should_fail_if_versions_do_not_match_after_timeout() { checker.stubQueries( new StubbedQuery( "SELECT schema_version FROM system.local WHERE key='local'", - mockResult(mockRow(null, VERSION1))), - new StubbedQuery( - "SELECT host_id, schema_version FROM system.peers", - mockResult(mockRow(node2.getHostId(), VERSION1)))); + mockResult(mockLocalRow(VERSION1))), + new StubbedQuery("SELECT * FROM system.peers", mockResult(mockValidPeerRow(VERSION1)))); // When CompletionStage future = checker.run(); @@ -274,10 +294,35 @@ private StubbedQuery(String queryString, AdminResult result) { } } - private AdminRow mockRow(UUID hostId, UUID schemaVersion) { + private AdminRow mockLocalRow(@SuppressWarnings("SameParameterValue") UUID schemaVersion) { + AdminRow row = mock(AdminRow.class); + when(row.getUuid("host_id")).thenReturn(node1.getHostId()); + when(row.getUuid("schema_version")).thenReturn(schemaVersion); + return row; + } + + private AdminRow mockValidPeerRow(UUID schemaVersion) { + return mockPeerRow(node2.getHostId(), schemaVersion, true, true, true, true); + } + + private static AdminRow mockPeerRow( + UUID hostId, + UUID schemaVersion, + boolean hasDatacenter, + boolean hasRack, + boolean hasRpcAddress, + boolean hasTokens) { AdminRow row = mock(AdminRow.class); when(row.getUuid("host_id")).thenReturn(hostId); + when(row.isNull("host_id")).thenReturn(hostId == null); when(row.getUuid("schema_version")).thenReturn(schemaVersion); + when(row.isNull("schema_version")).thenReturn(schemaVersion == null); + when(row.isNull("data_center")).thenReturn(!hasDatacenter); + when(row.isNull("rack")).thenReturn(!hasRack); + when(row.isNull("tokens")).thenReturn(!hasTokens); + when(row.isNull("rpc_address")).thenReturn(!hasRpcAddress); + when(row.isNull("native_address")).thenReturn(true); + when(row.isNull("native_port")).thenReturn(true); return row; } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java index 3866bbf8ddb..7986834bca2 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/TestNodeFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,13 +24,24 @@ public class TestNodeFactory { public static DefaultNode newNode(int lastIpByte, InternalDriverContext context) { - DefaultEndPoint endPoint = newEndPoint(lastIpByte); - DefaultNode node = new DefaultNode(endPoint, context); + DefaultNode node = newContactPoint(lastIpByte, context); node.hostId = UUID.randomUUID(); - node.broadcastRpcAddress = endPoint.resolve(); + node.broadcastRpcAddress = ((InetSocketAddress) node.getEndPoint().resolve()); + return node; + } + + public static DefaultNode newNode(int lastIpByte, UUID hostId, InternalDriverContext context) { + DefaultNode node = newContactPoint(lastIpByte, context); + node.hostId = hostId; + node.broadcastRpcAddress = ((InetSocketAddress) node.getEndPoint().resolve()); return node; } + public static DefaultNode newContactPoint(int lastIpByte, InternalDriverContext context) { + DefaultEndPoint endPoint = newEndPoint(lastIpByte); + return new DefaultNode(endPoint, context); + } + public static DefaultEndPoint newEndPoint(int lastByteOfIp) { return new DefaultEndPoint(new InetSocketAddress("127.0.0." + lastByteOfIp, 9042)); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java new file mode 100644 index 00000000000..b772d243976 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/IndexMetadataTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; +import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import org.junit.Test; + +public class IndexMetadataTest { + + @Test + public void should_describe_custom_index_class_correctly() { + IndexMetadata indexMetadata = + new DefaultIndexMetadata( + CqlIdentifier.fromCql("ks1"), + CqlIdentifier.fromCql("myTable"), + CqlIdentifier.fromCql("myName"), + IndexKind.CUSTOM, + "myTarget", + ImmutableMap.of("class_name", "com.datastax.MyClass")); + String describe = indexMetadata.describe(true); + assertThat(describe) + .contains( + "CREATE CUSTOM INDEX myname ON ks1.mytable (myTarget)\n" + + "USING 'com.datastax.MyClass'"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java new file mode 100644 index 00000000000..a7dee02f5e3 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/MultiplexingSchemaChangeListenerTest.java @@ -0,0 +1,452 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; + +@RunWith(MockitoJUnitRunner.Strict.class) +public class MultiplexingSchemaChangeListenerTest { + + @Mock private SchemaChangeListener child1; + @Mock private SchemaChangeListener child2; + @Mock private Session session; + @Mock private KeyspaceMetadata keyspace1, keyspace2; + @Mock private TableMetadata table1, table2; + @Mock private UserDefinedType userDefinedType1, userDefinedType2; + @Mock private FunctionMetadata function1, function2; + @Mock private AggregateMetadata aggregate1, aggregate2; + @Mock private ViewMetadata view1, view2; + + @Mock private Appender appender; + @Captor private ArgumentCaptor loggingEventCaptor; + + private Logger logger; + private Level initialLogLevel; + + @Before + public void addAppenders() { + logger = (Logger) LoggerFactory.getLogger(MultiplexingSchemaChangeListener.class); + initialLogLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(appender); + } + + @After + public void removeAppenders() { + logger.detachAppender(appender); + logger.setLevel(initialLogLevel); + } + + @Test + public void should_register() { + // given + MultiplexingSchemaChangeListener listener = new MultiplexingSchemaChangeListener(); + // when + listener.register(child1); + listener.register(child2); + // then + assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_flatten_child_multiplexing_listener_via_constructor() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(new MultiplexingSchemaChangeListener(child1, child2)); + // when + // then + assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_flatten_child_multiplexing_listener_via_register() { + // given + MultiplexingSchemaChangeListener listener = new MultiplexingSchemaChangeListener(); + // when + listener.register(new MultiplexingSchemaChangeListener(child1, child2)); + // then + assertThat(listener).extracting("listeners").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_notify_onKeyspaceCreated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onKeyspaceCreated(keyspace1); + // when + listener.onKeyspaceCreated(keyspace1); + // then + verify(child1).onKeyspaceCreated(keyspace1); + verify(child2).onKeyspaceCreated(keyspace1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onKeyspaceCreated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onKeyspaceDropped() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onKeyspaceDropped(keyspace1); + // when + listener.onKeyspaceDropped(keyspace1); + // then + verify(child1).onKeyspaceDropped(keyspace1); + verify(child2).onKeyspaceDropped(keyspace1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onKeyspaceDropped event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onKeyspaceUpdated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onKeyspaceUpdated(keyspace1, keyspace2); + // when + listener.onKeyspaceUpdated(keyspace1, keyspace2); + // then + verify(child1).onKeyspaceUpdated(keyspace1, keyspace2); + verify(child2).onKeyspaceUpdated(keyspace1, keyspace2); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onKeyspaceUpdated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onTableCreated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onTableCreated(table1); + // when + listener.onTableCreated(table1); + // then + verify(child1).onTableCreated(table1); + verify(child2).onTableCreated(table1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onTableCreated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onTableDropped() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onTableDropped(table1); + // when + listener.onTableDropped(table1); + // then + verify(child1).onTableDropped(table1); + verify(child2).onTableDropped(table1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onTableDropped event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onTableUpdated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onTableUpdated(table1, table2); + // when + listener.onTableUpdated(table1, table2); + // then + verify(child1).onTableUpdated(table1, table2); + verify(child2).onTableUpdated(table1, table2); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onTableUpdated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onUserDefinedTypeCreated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onUserDefinedTypeCreated(userDefinedType1); + // when + listener.onUserDefinedTypeCreated(userDefinedType1); + // then + verify(child1).onUserDefinedTypeCreated(userDefinedType1); + verify(child2).onUserDefinedTypeCreated(userDefinedType1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeCreated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onUserDefinedTypeDropped() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onUserDefinedTypeDropped(userDefinedType1); + // when + listener.onUserDefinedTypeDropped(userDefinedType1); + // then + verify(child1).onUserDefinedTypeDropped(userDefinedType1); + verify(child2).onUserDefinedTypeDropped(userDefinedType1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeDropped event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onUserDefinedTypeUpdated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()) + .given(child1) + .onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); + // when + listener.onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); + // then + verify(child1).onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); + verify(child2).onUserDefinedTypeUpdated(userDefinedType1, userDefinedType2); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onUserDefinedTypeUpdated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onFunctionCreated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onFunctionCreated(function1); + // when + listener.onFunctionCreated(function1); + // then + verify(child1).onFunctionCreated(function1); + verify(child2).onFunctionCreated(function1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onFunctionCreated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onFunctionDropped() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onFunctionDropped(function1); + // when + listener.onFunctionDropped(function1); + // then + verify(child1).onFunctionDropped(function1); + verify(child2).onFunctionDropped(function1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onFunctionDropped event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onFunctionUpdated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onFunctionUpdated(function1, function2); + // when + listener.onFunctionUpdated(function1, function2); + // then + verify(child1).onFunctionUpdated(function1, function2); + verify(child2).onFunctionUpdated(function1, function2); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onFunctionUpdated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onAggregateCreated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onAggregateCreated(aggregate1); + // when + listener.onAggregateCreated(aggregate1); + // then + verify(child1).onAggregateCreated(aggregate1); + verify(child2).onAggregateCreated(aggregate1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onAggregateCreated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onAggregateDropped() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onAggregateDropped(aggregate1); + // when + listener.onAggregateDropped(aggregate1); + // then + verify(child1).onAggregateDropped(aggregate1); + verify(child2).onAggregateDropped(aggregate1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onAggregateDropped event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onAggregateUpdated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onAggregateUpdated(aggregate1, aggregate2); + // when + listener.onAggregateUpdated(aggregate1, aggregate2); + // then + verify(child1).onAggregateUpdated(aggregate1, aggregate2); + verify(child2).onAggregateUpdated(aggregate1, aggregate2); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onAggregateUpdated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onViewCreated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onViewCreated(view1); + // when + listener.onViewCreated(view1); + // then + verify(child1).onViewCreated(view1); + verify(child2).onViewCreated(view1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onViewCreated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onViewDropped() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onViewDropped(view1); + // when + listener.onViewDropped(view1); + // then + verify(child1).onViewDropped(view1); + verify(child2).onViewDropped(view1); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onViewDropped event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onViewUpdated() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onViewUpdated(view1, view2); + // when + listener.onViewUpdated(view1, view2); + // then + verify(child1).onViewUpdated(view1, view2); + verify(child2).onViewUpdated(view1, view2); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onViewUpdated event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onSessionReady() { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + willThrow(new NullPointerException()).given(child1).onSessionReady(session); + // when + listener.onSessionReady(session); + // then + verify(child1).onSessionReady(session); + verify(child2).onSessionReady(session); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while notifying schema change listener child1 of an onSessionReady event. (NullPointerException: null)"); + } + + @Test + public void should_notify_close() throws Exception { + // given + MultiplexingSchemaChangeListener listener = + new MultiplexingSchemaChangeListener(child1, child2); + Exception child1Error = new NullPointerException(); + willThrow(child1Error).given(child1).close(); + // when + listener.close(); + // then + verify(child1).close(); + verify(child2).close(); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while closing schema change listener child1. (NullPointerException: null)"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java new file mode 100644 index 00000000000..03d63230992 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/TableMetadataTest.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema; + +import static com.datastax.oss.driver.api.core.CqlIdentifier.fromCql; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.internal.core.type.DefaultVectorType; +import com.datastax.oss.driver.internal.core.type.PrimitiveType; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.ProtocolConstants.DataType; +import com.google.common.collect.ImmutableList; +import java.util.UUID; +import org.junit.Test; + +public class TableMetadataTest { + + /** Tests CASSJAVA-2 */ + @Test + public void should_describe_table_with_vector_correctly() { + TableMetadata tableMetadata = + new DefaultTableMetadata( + fromCql("ks"), + fromCql("tb"), + UUID.randomUUID(), + false, + false, + ImmutableList.of( + new DefaultColumnMetadata( + fromCql("ks"), + fromCql("ks"), + fromCql("tb"), + new PrimitiveType(DataType.ASCII), + false)), + ImmutableMap.of(), + ImmutableMap.of( + fromCql("a"), + new DefaultColumnMetadata( + fromCql("ks"), + fromCql("ks"), + fromCql("tb"), + new DefaultVectorType(new PrimitiveType(DataType.INT), 3), + false)), + ImmutableMap.of(), + ImmutableMap.of()); + + String describe1 = tableMetadata.describe(true); + + assertThat(describe1).contains("vector,"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java index 14dfe6bfb4e..9cf5ba60983 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/AggregateParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,8 +24,8 @@ import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.protocol.internal.util.Bytes; import java.util.Collections; @@ -56,8 +58,10 @@ public class AggregateParserTest extends SchemaParserTestBase { "0"); @Before + @Override public void setup() { - when(context.getCodecRegistry()).thenReturn(new DefaultCodecRegistry("test")); + super.setup(); + when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); when(context.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java index 21ff579464d..84f5c09317f 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeClassNameParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +21,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import com.datastax.oss.driver.TestDataProviders; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; @@ -27,14 +30,17 @@ import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Locale; import java.util.Map; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.MockitoAnnotations; -@RunWith(MockitoJUnitRunner.class) +@RunWith(DataProviderRunner.class) public class DataTypeClassNameParserTest { private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); @@ -44,131 +50,167 @@ public class DataTypeClassNameParserTest { @Before public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); parser = new DataTypeClassNameParser(); } @Test - public void should_parse_native_types() { - for (Map.Entry entry : - DataTypeClassNameParser.NATIVE_TYPES_BY_CLASS_NAME.entrySet()) { - - String className = entry.getKey(); - DataType expectedType = entry.getValue(); - - assertThat(parse(className)).isEqualTo(expectedType); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_native_types(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + for (Map.Entry entry : + DataTypeClassNameParser.NATIVE_TYPES_BY_CLASS_NAME.entrySet()) { + + String className = entry.getKey(); + DataType expectedType = entry.getValue(); + + assertThat(parse(className)).isEqualTo(expectedType); + } + } finally { + Locale.setDefault(def); } } @Test - public void should_parse_collection_types() { - assertThat( - parse( - "org.apache.cassandra.db.marshal.ListType(" - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.listOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.FrozenType(" - + ("org.apache.cassandra.db.marshal.ListType(" - + "org.apache.cassandra.db.marshal.UTF8Type))"))) - .isEqualTo(DataTypes.frozenListOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.SetType(" - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.setOf(DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.UTF8Type)")) - .isEqualTo(DataTypes.mapOf(DataTypes.TEXT, DataTypes.TEXT)); - - assertThat( - parse( - "org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.FrozenType(" - + ("org.apache.cassandra.db.marshal.MapType(" - + "org.apache.cassandra.db.marshal.Int32Type," - + "org.apache.cassandra.db.marshal.Int32Type)))"))) - .isEqualTo( - DataTypes.mapOf(DataTypes.TEXT, DataTypes.frozenMapOf(DataTypes.INT, DataTypes.INT))); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_collection_types(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat( + parse( + "org.apache.cassandra.db.marshal.ListType(" + + "org.apache.cassandra.db.marshal.UTF8Type)")) + .isEqualTo(DataTypes.listOf(DataTypes.TEXT)); + + assertThat( + parse( + "org.apache.cassandra.db.marshal.FrozenType(" + + ("org.apache.cassandra.db.marshal.ListType(" + + "org.apache.cassandra.db.marshal.UTF8Type))"))) + .isEqualTo(DataTypes.frozenListOf(DataTypes.TEXT)); + + assertThat( + parse( + "org.apache.cassandra.db.marshal.SetType(" + + "org.apache.cassandra.db.marshal.UTF8Type)")) + .isEqualTo(DataTypes.setOf(DataTypes.TEXT)); + + assertThat( + parse( + "org.apache.cassandra.db.marshal.MapType(" + + "org.apache.cassandra.db.marshal.UTF8Type," + + "org.apache.cassandra.db.marshal.UTF8Type)")) + .isEqualTo(DataTypes.mapOf(DataTypes.TEXT, DataTypes.TEXT)); + + assertThat( + parse( + "org.apache.cassandra.db.marshal.MapType(" + + "org.apache.cassandra.db.marshal.UTF8Type," + + "org.apache.cassandra.db.marshal.FrozenType(" + + ("org.apache.cassandra.db.marshal.MapType(" + + "org.apache.cassandra.db.marshal.Int32Type," + + "org.apache.cassandra.db.marshal.Int32Type)))"))) + .isEqualTo( + DataTypes.mapOf(DataTypes.TEXT, DataTypes.frozenMapOf(DataTypes.INT, DataTypes.INT))); + } finally { + Locale.setDefault(def); + } } @Test - public void should_parse_user_type_when_definition_not_already_available() { - UserDefinedType addressType = - (UserDefinedType) - parse( - "org.apache.cassandra.db.marshal.UserType(" - + "foo,61646472657373," - + ("737472656574:org.apache.cassandra.db.marshal.UTF8Type," - + "7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type," - + ("70686f6e6573:org.apache.cassandra.db.marshal.SetType(" - + "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," - + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," - + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)") - + "))")); - - assertThat(addressType.getKeyspace().asInternal()).isEqualTo("foo"); - assertThat(addressType.getName().asInternal()).isEqualTo("address"); - assertThat(addressType.isFrozen()).isTrue(); - assertThat(addressType.getFieldNames().size()).isEqualTo(3); - - assertThat(addressType.getFieldNames().get(0).asInternal()).isEqualTo("street"); - assertThat(addressType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - - assertThat(addressType.getFieldNames().get(1).asInternal()).isEqualTo("zipcode"); - assertThat(addressType.getFieldTypes().get(1)).isEqualTo(DataTypes.INT); - - assertThat(addressType.getFieldNames().get(2).asInternal()).isEqualTo("phones"); - DataType phonesType = addressType.getFieldTypes().get(2); - assertThat(phonesType).isInstanceOf(SetType.class); - UserDefinedType phoneType = ((UserDefinedType) ((SetType) phonesType).getElementType()); - - assertThat(phoneType.getKeyspace().asInternal()).isEqualTo("foo"); - assertThat(phoneType.getName().asInternal()).isEqualTo("phone"); - assertThat(phoneType.isFrozen()).isTrue(); - assertThat(phoneType.getFieldNames().size()).isEqualTo(2); - - assertThat(phoneType.getFieldNames().get(0).asInternal()).isEqualTo("name"); - assertThat(phoneType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); - - assertThat(phoneType.getFieldNames().get(1).asInternal()).isEqualTo("number"); - assertThat(phoneType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_user_type_when_definition_not_already_available(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + UserDefinedType addressType = + (UserDefinedType) + parse( + "org.apache.cassandra.db.marshal.UserType(" + + "foo,61646472657373," + + ("737472656574:org.apache.cassandra.db.marshal.UTF8Type," + + "7a6970636f6465:org.apache.cassandra.db.marshal.Int32Type," + + ("70686f6e6573:org.apache.cassandra.db.marshal.SetType(" + + "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," + + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," + + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)") + + "))")); + + assertThat(addressType.getKeyspace().asInternal()).isEqualTo("foo"); + assertThat(addressType.getName().asInternal()).isEqualTo("address"); + assertThat(addressType.isFrozen()).isTrue(); + assertThat(addressType.getFieldNames().size()).isEqualTo(3); + + assertThat(addressType.getFieldNames().get(0).asInternal()).isEqualTo("street"); + assertThat(addressType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); + + assertThat(addressType.getFieldNames().get(1).asInternal()).isEqualTo("zipcode"); + assertThat(addressType.getFieldTypes().get(1)).isEqualTo(DataTypes.INT); + + assertThat(addressType.getFieldNames().get(2).asInternal()).isEqualTo("phones"); + DataType phonesType = addressType.getFieldTypes().get(2); + assertThat(phonesType).isInstanceOf(SetType.class); + UserDefinedType phoneType = ((UserDefinedType) ((SetType) phonesType).getElementType()); + + assertThat(phoneType.getKeyspace().asInternal()).isEqualTo("foo"); + assertThat(phoneType.getName().asInternal()).isEqualTo("phone"); + assertThat(phoneType.isFrozen()).isTrue(); + assertThat(phoneType.getFieldNames().size()).isEqualTo(2); + + assertThat(phoneType.getFieldNames().get(0).asInternal()).isEqualTo("name"); + assertThat(phoneType.getFieldTypes().get(0)).isEqualTo(DataTypes.TEXT); + + assertThat(phoneType.getFieldNames().get(1).asInternal()).isEqualTo("number"); + assertThat(phoneType.getFieldTypes().get(1)).isEqualTo(DataTypes.TEXT); + } finally { + Locale.setDefault(def); + } } @Test - public void should_make_a_frozen_copy_user_type_when_definition_already_available() { - UserDefinedType existing = mock(UserDefinedType.class); - - parse( - "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," - + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," - + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)", - ImmutableMap.of(CqlIdentifier.fromInternal("phone"), existing)); - - verify(existing).copy(true); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_make_a_frozen_copy_user_type_when_definition_already_available(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + UserDefinedType existing = mock(UserDefinedType.class); + + parse( + "org.apache.cassandra.db.marshal.UserType(foo,70686f6e65," + + "6e616d65:org.apache.cassandra.db.marshal.UTF8Type," + + "6e756d626572:org.apache.cassandra.db.marshal.UTF8Type)", + ImmutableMap.of(CqlIdentifier.fromInternal("phone"), existing)); + + verify(existing).copy(true); + } finally { + Locale.setDefault(def); + } } @Test - public void should_parse_tuple() { - TupleType tupleType = - (TupleType) - parse( - "org.apache.cassandra.db.marshal.TupleType(" - + "org.apache.cassandra.db.marshal.Int32Type," - + "org.apache.cassandra.db.marshal.UTF8Type," - + "org.apache.cassandra.db.marshal.FloatType)"); - - assertThat(tupleType.getComponentTypes().size()).isEqualTo(3); - assertThat(tupleType.getComponentTypes().get(0)).isEqualTo(DataTypes.INT); - assertThat(tupleType.getComponentTypes().get(1)).isEqualTo(DataTypes.TEXT); - assertThat(tupleType.getComponentTypes().get(2)).isEqualTo(DataTypes.FLOAT); + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_parse_tuple(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + TupleType tupleType = + (TupleType) + parse( + "org.apache.cassandra.db.marshal.TupleType(" + + "org.apache.cassandra.db.marshal.Int32Type," + + "org.apache.cassandra.db.marshal.UTF8Type," + + "org.apache.cassandra.db.marshal.FloatType)"); + + assertThat(tupleType.getComponentTypes().size()).isEqualTo(3); + assertThat(tupleType.getComponentTypes().get(0)).isEqualTo(DataTypes.INT); + assertThat(tupleType.getComponentTypes().get(1)).isEqualTo(DataTypes.TEXT); + assertThat(tupleType.getComponentTypes().get(2)).isEqualTo(DataTypes.FLOAT); + } finally { + Locale.setDefault(def); + } } private DataType parse(String toParse) { diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java index a7000a8536e..04ebaf4d68a 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/DataTypeCqlNameParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java index ba6928472ed..ab2d2e725ea 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/FunctionParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java index 7e030628bf4..a08a6cba838 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,11 +24,11 @@ import com.datastax.oss.driver.api.core.metadata.schema.FunctionSignature; import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; import com.datastax.oss.driver.internal.core.metadata.schema.refresh.SchemaRefresh; -import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import java.util.Map; import java.util.function.Consumer; @@ -59,7 +61,7 @@ public void should_parse_legacy_keyspace_row() { @Test public void should_parse_keyspace_with_all_children() { // Needed to parse the aggregate - when(context.getCodecRegistry()).thenReturn(new DefaultCodecRegistry("test")); + when(context.getCodecRegistry()).thenReturn(CodecRegistry.DEFAULT); SchemaRefresh refresh = (SchemaRefresh) @@ -137,7 +139,8 @@ public void should_parse_multiple_keyspaces() { } private MetadataRefresh parse(Consumer builderConfig) { - CassandraSchemaRows.Builder builder = new CassandraSchemaRows.Builder(true, null, "test"); + CassandraSchemaRows.Builder builder = + new CassandraSchemaRows.Builder(NODE_3_0, keyspaceFilter, "test"); builderConfig.accept(builder); SchemaRows rows = builder.build(); return new CassandraSchemaParser(rows, context).parse(); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java index 9adce5643d9..e5f0c732f7a 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/SchemaParserTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,17 +18,23 @@ package com.datastax.oss.driver.internal.core.metadata.schema.parsing; import static org.assertj.core.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; +import com.datastax.oss.driver.internal.core.metadata.schema.queries.KeyspaceFilter; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import java.nio.ByteBuffer; +import java.util.Collections; import java.util.List; +import org.junit.Before; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @@ -34,9 +42,17 @@ @RunWith(MockitoJUnitRunner.Silent.class) public abstract class SchemaParserTestBase { + protected static final Node NODE_2_2 = mockNode(Version.V2_2_0); + protected static final Node NODE_3_0 = mockNode(Version.V3_0_0); protected static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromInternal("ks"); @Mock protected DefaultMetadata currentMetadata; @Mock protected InternalDriverContext context; + @Mock protected KeyspaceFilter keyspaceFilter; + + @Before + public void setup() { + when(keyspaceFilter.includes(anyString())).thenReturn(true); + } protected static AdminRow mockFunctionRow( String keyspace, @@ -291,4 +307,11 @@ protected static AdminRow mockLegacyKeyspaceRow(String keyspaceName) { return row; } + + private static Node mockNode(Version version) { + Node node = mock(Node.class); + when(node.getExtras()).thenReturn(Collections.emptyMap()); + when(node.getCassandraVersion()).thenReturn(version); + return node; + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java index e361fb8a39d..a316473d071 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/TableParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +18,17 @@ package com.datastax.oss.driver.internal.core.metadata.schema.parsing; import static com.datastax.oss.driver.Assertions.assertThat; +import static org.mockito.Mockito.when; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; import com.datastax.oss.driver.api.core.metadata.schema.IndexKind; import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; import com.datastax.oss.driver.internal.core.adminrequest.AdminRow; import com.datastax.oss.driver.internal.core.metadata.schema.queries.CassandraSchemaRows; import com.datastax.oss.driver.internal.core.metadata.schema.queries.SchemaRows; @@ -108,12 +113,33 @@ public void should_parse_modern_tables() { checkTable(table); - assertThat((Map) table.getOptions().get(CqlIdentifier.fromInternal("caching"))) + @SuppressWarnings("unchecked") + Map caching = + (Map) table.getOptions().get(CqlIdentifier.fromInternal("caching")); + assertThat(caching) .hasSize(2) .containsEntry("keys", "ALL") .containsEntry("rows_per_partition", "NONE"); } + /** Covers two additional Cassandra 4.0 options added in JAVA-2090. */ + @Test + public void should_parse_read_repair_and_additional_write_policy() { + AdminRow tableRow40 = mockModernTableRow("ks", "foo"); + when(tableRow40.get("read_repair", TypeCodecs.TEXT)).thenReturn("NONE"); + when(tableRow40.get("additional_write_policy", TypeCodecs.TEXT)).thenReturn("40p"); + + SchemaRows rows = modernRows(tableRow40, COLUMN_ROWS_3_0, INDEX_ROWS_3_0); + TableParser parser = new TableParser(rows, context); + TableMetadata table = parser.parseTable(tableRow40, KEYSPACE_ID, Collections.emptyMap()); + + checkTable(table); + + assertThat(table.getOptions()) + .containsEntry(CqlIdentifier.fromInternal("read_repair"), "NONE") + .containsEntry(CqlIdentifier.fromInternal("additional_write_policy"), "40p"); + } + // Shared between 2.2 and 3.0 tests, all expected values are the same except the 'caching' option private void checkTable(TableMetadata table) { assertThat(table.getKeyspace().asInternal()).isEqualTo("ks"); @@ -156,29 +182,31 @@ private void checkTable(TableMetadata table) { assertThat(index.getClassName()).isNotPresent(); assertThat(index.getKind()).isEqualTo(IndexKind.COMPOSITES); assertThat(index.getTarget()).isEqualTo("v"); - assertThat( - (Map) table.getOptions().get(CqlIdentifier.fromInternal("compaction"))) + + assertThat(table.getIndex("foo_v_idx")).hasValue(index); + + @SuppressWarnings("unchecked") + Map compaction = + (Map) table.getOptions().get(CqlIdentifier.fromInternal("compaction")); + assertThat(compaction) .hasSize(2) .containsEntry("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy") .containsEntry("mock_option", "1"); } private SchemaRows legacyRows(AdminRow tableRow, Iterable columnRows) { - return rows(tableRow, columnRows, null, false); + return rows(tableRow, columnRows, null, NODE_2_2); } private SchemaRows modernRows( AdminRow tableRow, Iterable columnRows, Iterable indexesRows) { - return rows(tableRow, columnRows, indexesRows, true); + return rows(tableRow, columnRows, indexesRows, NODE_3_0); } private SchemaRows rows( - AdminRow tableRow, - Iterable columnRows, - Iterable indexesRows, - boolean isCassandraV3) { + AdminRow tableRow, Iterable columnRows, Iterable indexesRows, Node node) { CassandraSchemaRows.Builder builder = - new CassandraSchemaRows.Builder(isCassandraV3, null, "test") + new CassandraSchemaRows.Builder(node, keyspaceFilter, "test") .withTables(ImmutableList.of(tableRow)) .withColumns(columnRows); if (indexesRows != null) { diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java index b77919d6dea..f90d07ebe6d 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/UserDefinedTypeListParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java index 6ba458bebfb..1ba471e08f5 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/parsing/ViewParserTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -86,7 +88,7 @@ public void should_parse_view() { } private SchemaRows rows(AdminRow viewRow, Iterable columnRows) { - return new CassandraSchemaRows.Builder(true, null, "test") + return new CassandraSchemaRows.Builder(NODE_3_0, keyspaceFilter, "test") .withViews(ImmutableList.of(viewRow)) .withColumns(columnRows) .build(); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java index 9fbfa0e7349..2dd216474df 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra21SchemaQueriesTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,14 +21,14 @@ import static com.datastax.oss.driver.Assertions.assertThatStage; import static org.mockito.Mockito.when; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import java.util.Collections; import java.util.Queue; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.LinkedBlockingDeque; import org.junit.Test; @@ -40,9 +42,10 @@ public void should_query() { when(config.getStringList( DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) .thenReturn(Collections.emptyList()); + when(node.getCassandraVersion()).thenReturn(Version.V2_1_0); SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, null, config, "test"); + new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); CompletionStage result = queries.execute(); @@ -74,6 +77,8 @@ public void should_query() { assertThatStage(result) .isSuccess( rows -> { + assertThat(rows.getNode()).isEqualTo(node); + // Keyspace assertThat(rows.keyspaces()).hasSize(2); assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); @@ -116,11 +121,8 @@ static class SchemaQueriesWithMockedChannel extends Cassandra21SchemaQueries { final Queue calls = new LinkedBlockingDeque<>(); SchemaQueriesWithMockedChannel( - DriverChannel channel, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, refreshFuture, config, logPrefix); + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); } @Override diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java index 7fd37d2541a..fd28be59120 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra22SchemaQueriesTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,9 +21,11 @@ import static com.datastax.oss.driver.Assertions.assertThatStage; import static org.mockito.Mockito.when; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import java.util.Collections; @@ -40,9 +44,10 @@ public void should_query() { when(config.getStringList( DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) .thenReturn(Collections.emptyList()); + when(node.getCassandraVersion()).thenReturn(Version.V2_2_0); SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, null, config, "test"); + new SchemaQueriesWithMockedChannel(driverChannel, node, null, config, "test"); CompletionStage result = queries.execute(); @@ -84,6 +89,8 @@ public void should_query() { assertThatStage(result) .isSuccess( rows -> { + assertThat(rows.getNode()).isEqualTo(node); + // Keyspace assertThat(rows.keyspaces()).hasSize(2); assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); @@ -138,10 +145,11 @@ static class SchemaQueriesWithMockedChannel extends Cassandra22SchemaQueries { SchemaQueriesWithMockedChannel( DriverChannel channel, + Node node, CompletableFuture refreshFuture, DriverExecutionProfile config, String logPrefix) { - super(channel, refreshFuture, config, logPrefix); + super(channel, node, config, logPrefix); } @Override diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java index e2792935378..3b533e89ed5 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/Cassandra3SchemaQueriesTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,15 +21,15 @@ import static com.datastax.oss.driver.Assertions.assertThatStage; import static org.mockito.Mockito.when; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.internal.core.adminrequest.AdminResult; import com.datastax.oss.driver.internal.core.channel.DriverChannel; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import java.util.Collections; import java.util.Queue; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.LinkedBlockingDeque; import org.junit.Before; @@ -44,6 +46,7 @@ public void setup() { when(config.getStringList( DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) .thenReturn(Collections.emptyList()); + when(node.getCassandraVersion()).thenReturn(Version.V3_0_0); } @Test @@ -57,12 +60,12 @@ public void should_query_with_keyspace_filter() { DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, Collections.emptyList())) .thenReturn(ImmutableList.of("ks1", "ks2")); - should_query_with_where_clause(" WHERE keyspace_name in ('ks1','ks2')"); + should_query_with_where_clause(" WHERE keyspace_name IN ('ks1','ks2')"); } private void should_query_with_where_clause(String whereClause) { SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, null, config, "test"); + new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); CompletionStage result = queries.execute(); // Keyspace @@ -113,6 +116,8 @@ private void should_query_with_where_clause(String whereClause) { assertThatStage(result) .isSuccess( rows -> { + assertThat(rows.getNode()).isEqualTo(node); + // Keyspace assertThat(rows.keyspaces()).hasSize(2); assertThat(rows.keyspaces().get(0).getString("keyspace_name")).isEqualTo("ks1"); @@ -178,7 +183,7 @@ private void should_query_with_where_clause(String whereClause) { @Test public void should_query_with_paging() { SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, null, config, "test"); + new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); CompletionStage result = queries.execute(); // Keyspace @@ -242,7 +247,7 @@ public void should_query_with_paging() { @Test public void should_ignore_malformed_rows() { SchemaQueriesWithMockedChannel queries = - new SchemaQueriesWithMockedChannel(driverChannel, null, config, "test"); + new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); CompletionStage result = queries.execute(); // Keyspace @@ -323,17 +328,31 @@ public void should_ignore_malformed_rows() { }); } + @Test + public void should_abort_if_query_fails() { + SchemaQueriesWithMockedChannel queries = + new SchemaQueriesWithMockedChannel(driverChannel, node, config, "test"); + CompletionStage result = queries.execute(); + + Exception mockQueryError = new Exception("mock query error"); + + Call call = queries.calls.poll(); + assertThat(call.query).isEqualTo("SELECT * FROM system_schema.keyspaces"); + call.result.completeExceptionally(mockQueryError); + + channel.runPendingTasks(); + + assertThatStage(result).isFailed(throwable -> assertThat(throwable).isEqualTo(mockQueryError)); + } + /** Extends the class under test to mock the query execution logic. */ static class SchemaQueriesWithMockedChannel extends Cassandra3SchemaQueries { final Queue calls = new LinkedBlockingDeque<>(); SchemaQueriesWithMockedChannel( - DriverChannel channel, - CompletableFuture refreshFuture, - DriverExecutionProfile config, - String logPrefix) { - super(channel, refreshFuture, config, logPrefix); + DriverChannel channel, Node node, DriverExecutionProfile config, String logPrefix) { + super(channel, node, config, logPrefix); } @Override diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java new file mode 100644 index 00000000000..f9ac6c05576 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/DefaultSchemaQueriesFactoryTest.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema.queries; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.channel.DriverChannel; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class DefaultSchemaQueriesFactoryTest { + + enum Expected { + CASS_21(Cassandra21SchemaQueries.class), + CASS_22(Cassandra22SchemaQueries.class), + CASS_3(Cassandra3SchemaQueries.class), + CASS_4(Cassandra4SchemaQueries.class), + DSE_6_8(Dse68SchemaQueries.class); + + final Class clz; + + Expected(Class clz) { + this.clz = clz; + } + + public Class getClz() { + return clz; + } + } + + private static ImmutableList> cassandraVersions = + ImmutableList.>builder() + .add(ImmutableList.of("2.1.0", Optional.empty(), Expected.CASS_21)) + .add(ImmutableList.of("2.2.0", Optional.empty(), Expected.CASS_22)) + .add(ImmutableList.of("2.2.1", Optional.empty(), Expected.CASS_22)) + // Not a real version, just documenting behaviour of existing impl + .add(ImmutableList.of("2.3.0", Optional.empty(), Expected.CASS_22)) + // We now return you to real versions + .add(ImmutableList.of("3.0.0", Optional.empty(), Expected.CASS_3)) + .add(ImmutableList.of("3.0.1", Optional.empty(), Expected.CASS_3)) + .add(ImmutableList.of("3.1.0", Optional.empty(), Expected.CASS_3)) + .add(ImmutableList.of("4.0.0", Optional.empty(), Expected.CASS_4)) + .add(ImmutableList.of("4.0.1", Optional.empty(), Expected.CASS_4)) + .add(ImmutableList.of("4.1.0", Optional.empty(), Expected.CASS_4)) + .build(); + + private static ImmutableList> dseVersions = + ImmutableList.>builder() + // DSE 6.0.0 + .add(ImmutableList.of("4.0.0.2284", Optional.of("6.0.0"), Expected.CASS_3)) + // DSE 6.0.1 + .add(ImmutableList.of("4.0.0.2349", Optional.of("6.0.1"), Expected.CASS_3)) + // DSE 6.0.2 moved to DSE version (minus dots) in an extra element + .add(ImmutableList.of("4.0.0.602", Optional.of("6.0.2"), Expected.CASS_3)) + // DSE 6.7.0 continued with the same idea + .add(ImmutableList.of("4.0.0.670", Optional.of("6.7.0"), Expected.CASS_4)) + // DSE 6.8.0 does the same + .add(ImmutableList.of("4.0.0.680", Optional.of("6.8.0"), Expected.DSE_6_8)) + .build(); + + private static ImmutableList> allVersions = + ImmutableList.>builder() + .addAll(cassandraVersions) + .addAll(dseVersions) + .build(); + + @DataProvider(format = "%m %p[1] => %p[0]") + public static Iterable expected() { + + return allVersions; + } + + @Test + @UseDataProvider("expected") + public void should_return_correct_schema_queries_impl( + String cassandraVersion, Optional dseVersion, Expected expected) { + + final Node mockNode = mock(Node.class); + when(mockNode.getCassandraVersion()).thenReturn(Version.parse(cassandraVersion)); + dseVersion.ifPresent( + versionStr -> { + when(mockNode.getExtras()) + .thenReturn( + ImmutableMap.of( + DseNodeProperties.DSE_VERSION, Version.parse(versionStr))); + }); + + DefaultSchemaQueriesFactory factory = buildFactory(); + + @SuppressWarnings("unchecked") + SchemaQueries queries = factory.newInstance(mockNode, mock(DriverChannel.class)); + + assertThat(queries.getClass()).isEqualTo(expected.getClz()); + } + + private DefaultSchemaQueriesFactory buildFactory() { + + final DriverExecutionProfile mockProfile = mock(DriverExecutionProfile.class); + final DriverConfig mockConfig = mock(DriverConfig.class); + when(mockConfig.getDefaultProfile()).thenReturn(mockProfile); + final InternalDriverContext mockInternalCtx = mock(InternalDriverContext.class); + when(mockInternalCtx.getConfig()).thenReturn(mockConfig); + + return new DefaultSchemaQueriesFactory(mockInternalCtx); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java new file mode 100644 index 00000000000..7e2f6219eac --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/KeyspaceFilterTest.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.schema.queries; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import java.util.Arrays; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.Test; + +public class KeyspaceFilterTest { + + private static final ImmutableSet KEYSPACES = + ImmutableSet.of( + "system", "inventory_test", "inventory_prod", "customers_test", "customers_prod"); + + @Test + public void should_not_filter_when_no_rules() { + KeyspaceFilter filter = KeyspaceFilter.newInstance("test", Arrays.asList()); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).isEqualTo(KEYSPACES); + } + + @Test + public void should_filter_on_server_when_only_exact_rules() { + KeyspaceFilter filter = + KeyspaceFilter.newInstance( + "test", Arrays.asList("inventory_test", "customers_test", "!system")); + // Note that exact excludes are redundant in this case: either they match an include and will be + // ignored, or they don't and the keyspace is already ignored. + // We let it slide, but a warning is logged. + assertThat(filter.getWhereClause()) + .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); + } + + @Test + public void should_ignore_exact_exclude_that_collides_with_exact_include() { + KeyspaceFilter filter = + KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "!inventory_test")); + assertThat(filter.getWhereClause()).isEqualTo(" WHERE keyspace_name IN ('inventory_test')"); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test"); + + // Order does not matter + filter = KeyspaceFilter.newInstance("test", Arrays.asList("!inventory_test", "inventory_test")); + assertThat(filter.getWhereClause()).isEqualTo(" WHERE keyspace_name IN ('inventory_test')"); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test"); + } + + @Test + public void should_apply_disjoint_exact_and_regex_rules() { + KeyspaceFilter filter = + KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "/^customers.*/")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)) + .containsOnly("inventory_test", "customers_test", "customers_prod"); + + filter = KeyspaceFilter.newInstance("test", Arrays.asList("!system", "!/^inventory.*/")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).containsOnly("customers_test", "customers_prod"); + + // The remaining cases could be simplified, but they are supported nevertheless: + /*redundant:*/ + filter = KeyspaceFilter.newInstance("test", Arrays.asList("!/^customers.*/", "inventory_test")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "inventory_prod", "system"); + + /*redundant:*/ + filter = KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!system")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).containsOnly("customers_test", "customers_prod"); + } + + @Test + public void should_apply_intersecting_exact_and_regex_rules() { + // Include all customer keyspaces except one: + KeyspaceFilter filter = + KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!customers_test")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).containsOnly("customers_prod"); + + // Exclude all customer keyspaces except one (also implies include every other keyspace): + filter = KeyspaceFilter.newInstance("test", Arrays.asList("!/^customers.*/", "customers_test")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)) + .containsOnly("customers_test", "inventory_test", "inventory_prod", "system"); + } + + @Test + public void should_apply_intersecting_regex_rules() { + KeyspaceFilter filter = + KeyspaceFilter.newInstance("test", Arrays.asList("/^customers.*/", "!/.*test$/")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).containsOnly("customers_prod"); + + // Throwing an exact name in the mix doesn't change the other rules + filter = + KeyspaceFilter.newInstance( + "test", Arrays.asList("inventory_prod", "/^customers.*/", "!/.*test$/")); + assertThat(filter.getWhereClause()).isEmpty(); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_prod", "customers_prod"); + } + + @Test + public void should_skip_malformed_rule() { + KeyspaceFilter filter = + KeyspaceFilter.newInstance("test", Arrays.asList("inventory_test", "customers_test", "//")); + assertThat(filter.getWhereClause()) + .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); + } + + @Test + public void should_skip_invalid_regex() { + KeyspaceFilter filter = + KeyspaceFilter.newInstance( + "test", Arrays.asList("inventory_test", "customers_test", "/*/")); + assertThat(filter.getWhereClause()) + .isEqualTo(" WHERE keyspace_name IN ('inventory_test','customers_test')"); + assertThat(apply(filter, KEYSPACES)).containsOnly("inventory_test", "customers_test"); + } + + private static Set apply(KeyspaceFilter filter, Set keyspaces) { + return keyspaces.stream().filter(filter::includes).collect(Collectors.toSet()); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java index dd309ffac37..e0da405993b 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/queries/SchemaQueriesTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java index 1f171d90611..4f124d2c4a0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/schema/refresh/SchemaRefreshTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,10 +18,12 @@ package com.datastax.oss.driver.internal.core.metadata.schema.refresh; import static com.datastax.oss.driver.Assertions.assertThat; +import static org.mockito.Mockito.when; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.internal.core.channel.ChannelFactory; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.DefaultMetadata; import com.datastax.oss.driver.internal.core.metadata.MetadataRefresh; @@ -51,10 +55,12 @@ public class SchemaRefreshTest { private static final DefaultKeyspaceMetadata OLD_KS1 = newKeyspace("ks1", true, OLD_T1, OLD_T2); @Mock private InternalDriverContext context; + @Mock private ChannelFactory channelFactory; private DefaultMetadata oldMetadata; @Before public void setup() { + when(context.getChannelFactory()).thenReturn(channelFactory); oldMetadata = DefaultMetadata.EMPTY.withSchema( ImmutableMap.of(OLD_KS1.getName(), OLD_KS1), false, context); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java index b0c0922d7fd..238f4e0687a 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ByteOrderedTokenRangeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java index fc4a8a3a7e5..3170e2dd6b2 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/DefaultTokenMapTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java index 3d14f21c741..e5c1a0fc47c 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/Murmur3TokenRangeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java index 01627628609..42dc5e69199 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/NetworkTopologyReplicationStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,9 +33,9 @@ import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.junit.Test; import org.junit.runner.RunWith; @@ -93,16 +95,15 @@ public void should_compute_for_simple_layout() { new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "1", DC2, "1"), "test"); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); // Note: this also asserts the iteration order of the sets (unlike containsEntry(token, set)) assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); assertThat(replicasByToken.get(TOKEN04)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node2, node1); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN04)); } /** 8 tokens, 4 nodes in 2 DCs in the same racks, RF = 1 in each DC. */ @@ -130,8 +131,7 @@ public void should_compute_for_simple_layout_with_multiple_nodes_per_rack() { new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "1", DC2, "1"), "test"); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); @@ -139,10 +139,10 @@ public void should_compute_for_simple_layout_with_multiple_nodes_per_rack() { assertThat(replicasByToken.get(TOKEN03)).containsExactly(node2, node3); assertThat(replicasByToken.get(TOKEN05)).containsExactly(node3, node4); assertThat(replicasByToken.get(TOKEN07)).containsExactly(node4, node1); - assertThat(replicasByToken.get(TOKEN13)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node2, node3); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node3, node4); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node4, node1); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN03)); + assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN05)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN07)); } /** 6 tokens, 3 nodes in 3 DCs, RF = 1 in each DC. */ @@ -167,17 +167,16 @@ public void should_compute_for_simple_layout_with_3_dcs() { ImmutableMap.of(DC1, "1", DC2, "1", DC3, "1"), "test"); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3); assertThat(replicasByToken.get(TOKEN05)).containsExactly(node2, node3, node1); assertThat(replicasByToken.get(TOKEN09)).containsExactly(node3, node1, node2); - assertThat(replicasByToken.get(TOKEN11)).containsExactly(node1, node2, node3); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node2, node3, node1); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node3, node1, node2); + assertThat(replicasByToken.get(TOKEN11)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN05)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN09)); } /** 10 tokens, 4 nodes in 2 DCs, RF = 2 in each DC, 1 node owns 4 tokens, the others only 2. */ @@ -209,21 +208,21 @@ public void should_compute_for_unbalanced_ring() { new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "2", DC2, "2"), "test"); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN03)).containsExactly(node1, node2, node3, node4); + assertThat(replicasByToken.get(TOKEN03)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN05)).containsExactly(node2, node3, node4, node1); assertThat(replicasByToken.get(TOKEN07)).containsExactly(node3, node4, node1, node2); assertThat(replicasByToken.get(TOKEN09)).containsExactly(node4, node1, node2, node3); - assertThat(replicasByToken.get(TOKEN11)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN13)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node2, node3, node4, node1); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node3, node4, node1, node2); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node4, node1, node2, node3); + assertThat(replicasByToken.get(TOKEN11)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN05)); + assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN07)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN09)); + ; } /** 16 tokens, 8 nodes in 2 DCs with 2 per rack, RF = 2 in each DC. */ @@ -265,8 +264,7 @@ public void should_compute_with_multiple_racks_per_dc() { new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "2", DC2, "2"), "test"); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); @@ -278,14 +276,14 @@ public void should_compute_with_multiple_racks_per_dc() { assertThat(replicasByToken.get(TOKEN06)).containsExactly(node6, node7, node8, node1); assertThat(replicasByToken.get(TOKEN07)).containsExactly(node7, node8, node1, node2); assertThat(replicasByToken.get(TOKEN08)).containsExactly(node8, node1, node2, node3); - assertThat(replicasByToken.get(TOKEN12)).containsExactly(node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN13)).containsExactly(node2, node3, node4, node5); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node3, node4, node5, node6); - assertThat(replicasByToken.get(TOKEN15)).containsExactly(node4, node5, node6, node7); - assertThat(replicasByToken.get(TOKEN16)).containsExactly(node5, node6, node7, node8); - assertThat(replicasByToken.get(TOKEN17)).containsExactly(node6, node7, node8, node1); - assertThat(replicasByToken.get(TOKEN18)).containsExactly(node7, node8, node1, node2); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node8, node1, node2, node3); + assertThat(replicasByToken.get(TOKEN12)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN02)); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN03)); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN04)); + assertThat(replicasByToken.get(TOKEN16)).isSameAs(replicasByToken.get(TOKEN05)); + assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN06)); + assertThat(replicasByToken.get(TOKEN18)).isSameAs(replicasByToken.get(TOKEN07)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN08)); } /** @@ -334,8 +332,7 @@ public void should_pick_dc_replicas_in_different_racks_first() { new NetworkTopologyReplicationStrategy(ImmutableMap.of(DC1, "3", DC2, "3"), "test"); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); @@ -355,22 +352,14 @@ public void should_pick_dc_replicas_in_different_racks_first() { .containsExactly(node7, node8, node1, node2, node3, node4); assertThat(replicasByToken.get(TOKEN08)) .containsExactly(node8, node1, node2, node4, node5, node3); - assertThat(replicasByToken.get(TOKEN12)) - .containsExactly(node1, node2, node5, node3, node6, node4); - assertThat(replicasByToken.get(TOKEN13)) - .containsExactly(node2, node3, node5, node6, node4, node7); - assertThat(replicasByToken.get(TOKEN14)) - .containsExactly(node3, node4, node5, node6, node7, node8); - assertThat(replicasByToken.get(TOKEN15)) - .containsExactly(node4, node5, node6, node8, node1, node7); - assertThat(replicasByToken.get(TOKEN16)) - .containsExactly(node5, node6, node1, node7, node2, node8); - assertThat(replicasByToken.get(TOKEN17)) - .containsExactly(node6, node7, node1, node2, node8, node3); - assertThat(replicasByToken.get(TOKEN18)) - .containsExactly(node7, node8, node1, node2, node3, node4); - assertThat(replicasByToken.get(TOKEN19)) - .containsExactly(node8, node1, node2, node4, node5, node3); + assertThat(replicasByToken.get(TOKEN12)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN02)); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN03)); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN04)); + assertThat(replicasByToken.get(TOKEN16)).isSameAs(replicasByToken.get(TOKEN05)); + assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN06)); + assertThat(replicasByToken.get(TOKEN18)).isSameAs(replicasByToken.get(TOKEN07)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN08)); } /** @@ -382,42 +371,34 @@ public void should_pick_dc_replicas_in_different_racks_first() { @Test public void should_pick_dc_replicas_in_different_racks_first_when_nodes_own_consecutive_tokens() { // When - SetMultimap replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(3); + Map> replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(3); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(16); assertThat(replicasByToken.get(TOKEN01)) .containsExactly(node1, node5, node3, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN02)) - .containsExactly(node1, node5, node3, node2, node6, node4); + assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN03)) .containsExactly(node3, node5, node7, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN04)) - .containsExactly(node3, node5, node7, node2, node6, node4); + assertThat(replicasByToken.get(TOKEN04)).isSameAs(replicasByToken.get(TOKEN03)); assertThat(replicasByToken.get(TOKEN05)) .containsExactly(node5, node2, node6, node4, node1, node7); - assertThat(replicasByToken.get(TOKEN06)) - .containsExactly(node5, node2, node6, node4, node1, node7); + assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN05)); assertThat(replicasByToken.get(TOKEN07)) .containsExactly(node7, node2, node6, node4, node1, node3); - assertThat(replicasByToken.get(TOKEN08)) - .containsExactly(node7, node2, node6, node4, node1, node3); + assertThat(replicasByToken.get(TOKEN08)).isSameAs(replicasByToken.get(TOKEN07)); assertThat(replicasByToken.get(TOKEN12)) .containsExactly(node2, node6, node4, node1, node5, node3); - assertThat(replicasByToken.get(TOKEN13)) - .containsExactly(node2, node6, node4, node1, node5, node3); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); assertThat(replicasByToken.get(TOKEN14)) .containsExactly(node4, node6, node8, node1, node5, node3); - assertThat(replicasByToken.get(TOKEN15)) - .containsExactly(node4, node6, node8, node1, node5, node3); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN14)); assertThat(replicasByToken.get(TOKEN16)) .containsExactly(node6, node1, node5, node3, node2, node8); - assertThat(replicasByToken.get(TOKEN17)) - .containsExactly(node6, node1, node5, node3, node2, node8); + assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN16)); assertThat(replicasByToken.get(TOKEN18)) .containsExactly(node8, node1, node5, node3, node2, node4); - assertThat(replicasByToken.get(TOKEN19)) - .containsExactly(node8, node1, node5, node3, node2, node4); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN18)); } /** @@ -430,45 +411,37 @@ public void should_pick_dc_replicas_in_different_racks_first_when_nodes_own_cons @Test public void should_pick_dc_replicas_in_different_racks_first_when_all_nodes_contain_all_data() { // When - SetMultimap replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(4); + Map> replicasByToken = computeWithDifferentRacksAndConsecutiveTokens(4); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(16); assertThat(replicasByToken.get(TOKEN01)) .containsExactly(node1, node5, node3, node7, node2, node6, node4, node8); - assertThat(replicasByToken.get(TOKEN02)) - .containsExactly(node1, node5, node3, node7, node2, node6, node4, node8); + assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN03)) .containsExactly(node3, node5, node7, node2, node6, node4, node8, node1); - assertThat(replicasByToken.get(TOKEN04)) - .containsExactly(node3, node5, node7, node2, node6, node4, node8, node1); + assertThat(replicasByToken.get(TOKEN04)).isSameAs(replicasByToken.get(TOKEN03)); assertThat(replicasByToken.get(TOKEN05)) .containsExactly(node5, node2, node6, node4, node8, node1, node7, node3); - assertThat(replicasByToken.get(TOKEN06)) - .containsExactly(node5, node2, node6, node4, node8, node1, node7, node3); + assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN05)); assertThat(replicasByToken.get(TOKEN07)) .containsExactly(node7, node2, node6, node4, node8, node1, node3, node5); - assertThat(replicasByToken.get(TOKEN08)) - .containsExactly(node7, node2, node6, node4, node8, node1, node3, node5); + assertThat(replicasByToken.get(TOKEN08)).isSameAs(replicasByToken.get(TOKEN07)); assertThat(replicasByToken.get(TOKEN12)) .containsExactly(node2, node6, node4, node8, node1, node5, node3, node7); - assertThat(replicasByToken.get(TOKEN13)) - .containsExactly(node2, node6, node4, node8, node1, node5, node3, node7); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); assertThat(replicasByToken.get(TOKEN14)) .containsExactly(node4, node6, node8, node1, node5, node3, node7, node2); - assertThat(replicasByToken.get(TOKEN15)) - .containsExactly(node4, node6, node8, node1, node5, node3, node7, node2); + assertThat(replicasByToken.get(TOKEN15)).isSameAs(replicasByToken.get(TOKEN14)); assertThat(replicasByToken.get(TOKEN16)) .containsExactly(node6, node1, node5, node3, node7, node2, node8, node4); - assertThat(replicasByToken.get(TOKEN17)) - .containsExactly(node6, node1, node5, node3, node7, node2, node8, node4); + assertThat(replicasByToken.get(TOKEN17)).isSameAs(replicasByToken.get(TOKEN16)); assertThat(replicasByToken.get(TOKEN18)) .containsExactly(node8, node1, node5, node3, node7, node2, node4, node6); - assertThat(replicasByToken.get(TOKEN19)) - .containsExactly(node8, node1, node5, node3, node7, node2, node4, node6); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN18)); } - private SetMultimap computeWithDifferentRacksAndConsecutiveTokens( + private Map> computeWithDifferentRacksAndConsecutiveTokens( int replicationFactor) { List ring = ImmutableList.of( @@ -518,15 +491,15 @@ private SetMultimap computeWithDifferentRacksAndConsecutiveTokens( @Test public void should_compute_complex_layout() { // When - SetMultimap replicasByToken = computeComplexLayout(2); + Map> replicasByToken = computeComplexLayout(2); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(18); assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node5, node2, node6); - assertThat(replicasByToken.get(TOKEN02)).containsExactly(node1, node5, node2, node6); + assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN03)).containsExactly(node5, node3, node2, node6); assertThat(replicasByToken.get(TOKEN04)).containsExactly(node3, node5, node2, node6); - assertThat(replicasByToken.get(TOKEN05)).containsExactly(node1, node5, node2, node6); + assertThat(replicasByToken.get(TOKEN05)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN06)).containsExactly(node5, node2, node6, node3); assertThat(replicasByToken.get(TOKEN07)).containsExactly(node2, node6, node3, node5); assertThat(replicasByToken.get(TOKEN08)).containsExactly(node6, node3, node4, node5); @@ -534,8 +507,8 @@ public void should_compute_complex_layout() { assertThat(replicasByToken.get(TOKEN10)).containsExactly(node4, node5, node6, node3); assertThat(replicasByToken.get(TOKEN11)).containsExactly(node5, node4, node6, node3); assertThat(replicasByToken.get(TOKEN12)).containsExactly(node4, node6, node3, node5); - assertThat(replicasByToken.get(TOKEN13)).containsExactly(node4, node6, node3, node5); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node6, node3, node5); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN07)); assertThat(replicasByToken.get(TOKEN15)).containsExactly(node6, node3, node2, node5); assertThat(replicasByToken.get(TOKEN16)).containsExactly(node3, node2, node6, node5); assertThat(replicasByToken.get(TOKEN17)).containsExactly(node2, node6, node1, node5); @@ -551,14 +524,13 @@ public void should_compute_complex_layout() { @Test public void should_compute_complex_layout_with_rf_too_high() { // When - SetMultimap replicasByToken = computeComplexLayout(4); + Map> replicasByToken = computeComplexLayout(4); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(18); assertThat(replicasByToken.get(TOKEN01)) .containsExactly(node1, node5, node3, node2, node6, node4); - assertThat(replicasByToken.get(TOKEN02)) - .containsExactly(node1, node5, node3, node2, node6, node4); + assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN03)) .containsExactly(node5, node3, node1, node2, node6, node4); assertThat(replicasByToken.get(TOKEN04)) @@ -579,8 +551,7 @@ public void should_compute_complex_layout_with_rf_too_high() { .containsExactly(node5, node4, node6, node2, node3, node1); assertThat(replicasByToken.get(TOKEN12)) .containsExactly(node4, node6, node2, node3, node5, node1); - assertThat(replicasByToken.get(TOKEN13)) - .containsExactly(node4, node6, node2, node3, node5, node1); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); assertThat(replicasByToken.get(TOKEN14)) .containsExactly(node2, node6, node3, node5, node1, node4); assertThat(replicasByToken.get(TOKEN15)) @@ -593,7 +564,7 @@ public void should_compute_complex_layout_with_rf_too_high() { .containsExactly(node6, node1, node5, node3, node2, node4); } - private SetMultimap computeComplexLayout(int replicationFactor) { + private Map> computeComplexLayout(int replicationFactor) { List ring = ImmutableList.of( TOKEN01, TOKEN02, TOKEN03, TOKEN04, TOKEN05, TOKEN06, TOKEN07, TOKEN08, TOKEN09, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java index 57a22f463cf..54ac8a99738 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/RandomTokenRangeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java new file mode 100644 index 00000000000..d58d13933c2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/ReplicationFactorTest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metadata.token; + +import static com.datastax.oss.driver.Assertions.assertThat; + +import org.junit.Test; + +public class ReplicationFactorTest { + @Test + public void should_parse_factor_from_string() { + ReplicationFactor transFactor = ReplicationFactor.fromString("3/1"); + assertThat(transFactor.fullReplicas()).isEqualTo(2); + assertThat(transFactor.hasTransientReplicas()).isTrue(); + assertThat(transFactor.transientReplicas()).isEqualTo(1); + + ReplicationFactor factor = ReplicationFactor.fromString("3"); + assertThat(factor.fullReplicas()).isEqualTo(3); + assertThat(factor.hasTransientReplicas()).isFalse(); + assertThat(factor.transientReplicas()).isEqualTo(0); + } + + @Test + public void should_create_string_from_factor() { + ReplicationFactor transFactor = new ReplicationFactor(3, 1); + assertThat(transFactor.toString()).isEqualTo("3/1"); + ReplicationFactor factor = new ReplicationFactor(3); + assertThat(factor.toString()).isEqualTo("3"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java index 7dd48a0088d..517d8cfdb84 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/SimpleReplicationStrategyTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,9 +23,9 @@ import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.SetMultimap; import java.util.List; import java.util.Map; +import java.util.Set; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -61,19 +63,18 @@ public void should_compute_for_simple_layout() { List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); Map tokenToPrimary = ImmutableMap.of(TOKEN01, node1, TOKEN06, node2, TOKEN14, node1, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(2); + SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); // Note: this also asserts the iteration order of the sets (unlike containsEntry(token, set)) assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); assertThat(replicasByToken.get(TOKEN06)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node2, node1); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN06)); } /** 4 tokens, 2 nodes owning 2 consecutive tokens each, RF = 2. */ @@ -83,18 +84,17 @@ public void should_compute_when_nodes_own_consecutive_tokens() { List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); Map tokenToPrimary = ImmutableMap.of(TOKEN01, node1, TOKEN06, node1, TOKEN14, node2, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(2); + SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN06)).containsExactly(node1, node2); + assertThat(replicasByToken.get(TOKEN06)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node2, node1); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN14)); } /** 4 tokens, 1 node owns 3 of them, RF = 2. */ @@ -104,11 +104,10 @@ public void should_compute_when_ring_unbalanced() { List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); Map tokenToPrimary = ImmutableMap.of(TOKEN01, node1, TOKEN06, node1, TOKEN14, node2, TOKEN19, node1); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(2); + SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(2)); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); @@ -125,18 +124,17 @@ public void should_compute_when_replication_factor_is_larger_than_cluster_size() List ring = ImmutableList.of(TOKEN01, TOKEN06, TOKEN14, TOKEN19); Map tokenToPrimary = ImmutableMap.of(TOKEN01, node1, TOKEN06, node2, TOKEN14, node1, TOKEN19, node2); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(6); + SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(6)); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node2); assertThat(replicasByToken.get(TOKEN06)).containsExactly(node2, node1); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node1, node2); - assertThat(replicasByToken.get(TOKEN19)).containsExactly(node2, node1); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN01)); + assertThat(replicasByToken.get(TOKEN19)).isSameAs(replicasByToken.get(TOKEN06)); } @Test @@ -185,16 +183,15 @@ public void should_compute_for_complex_layout() { .put(TOKEN18, node6) .build(); - SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(3); + SimpleReplicationStrategy strategy = new SimpleReplicationStrategy(new ReplicationFactor(3)); // When - SetMultimap replicasByToken = - strategy.computeReplicasByToken(tokenToPrimary, ring); + Map> replicasByToken = strategy.computeReplicasByToken(tokenToPrimary, ring); // Then assertThat(replicasByToken.keySet().size()).isEqualTo(ring.size()); assertThat(replicasByToken.get(TOKEN01)).containsExactly(node1, node5, node3); - assertThat(replicasByToken.get(TOKEN02)).containsExactly(node1, node5, node3); + assertThat(replicasByToken.get(TOKEN02)).isSameAs(replicasByToken.get(TOKEN01)); assertThat(replicasByToken.get(TOKEN03)).containsExactly(node5, node3, node1); assertThat(replicasByToken.get(TOKEN04)).containsExactly(node3, node1, node5); assertThat(replicasByToken.get(TOKEN05)).containsExactly(node1, node5, node2); @@ -205,8 +202,8 @@ public void should_compute_for_complex_layout() { assertThat(replicasByToken.get(TOKEN10)).containsExactly(node4, node5, node2); assertThat(replicasByToken.get(TOKEN11)).containsExactly(node5, node4, node2); assertThat(replicasByToken.get(TOKEN12)).containsExactly(node4, node2, node6); - assertThat(replicasByToken.get(TOKEN13)).containsExactly(node4, node2, node6); - assertThat(replicasByToken.get(TOKEN14)).containsExactly(node2, node6, node3); + assertThat(replicasByToken.get(TOKEN13)).isSameAs(replicasByToken.get(TOKEN12)); + assertThat(replicasByToken.get(TOKEN14)).isSameAs(replicasByToken.get(TOKEN07)); assertThat(replicasByToken.get(TOKEN15)).containsExactly(node6, node3, node2); assertThat(replicasByToken.get(TOKEN16)).containsExactly(node3, node2, node6); assertThat(replicasByToken.get(TOKEN17)).containsExactly(node2, node6, node1); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java index 174fa69519a..7fcd56ba86e 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java index 42111947ec2..77cfbb30d77 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metadata/token/TokenRangeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java new file mode 100644 index 00000000000..13efda4b352 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdGeneratorTest.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class DefaultMetricIdGeneratorTest { + + @Mock private InternalDriverContext context; + + @Mock private DriverConfig config; + + @Mock private DriverExecutionProfile profile; + + @Mock private Node node; + + @Mock private EndPoint endpoint; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + given(context.getConfig()).willReturn(config); + given(context.getSessionName()).willReturn("s0"); + given(config.getDefaultProfile()).willReturn(profile); + given(node.getEndPoint()).willReturn(endpoint); + given(endpoint.asMetricPrefix()).willReturn("10_1_2_3:9042"); + } + + @Test + @UseDataProvider("sessionMetrics") + public void should_generate_session_metric(String prefix, String expectedName) { + // given + given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) + .willReturn(prefix); + DefaultMetricIdGenerator generator = new DefaultMetricIdGenerator(context); + // when + MetricId id = generator.sessionMetricId(DefaultSessionMetric.CONNECTED_NODES); + // then + assertThat(id.getName()).isEqualTo(expectedName); + assertThat(id.getTags()).isEmpty(); + } + + @Test + @UseDataProvider("nodeMetrics") + public void should_generate_node_metric(String prefix, String expectedName) { + // given + given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) + .willReturn(prefix); + DefaultMetricIdGenerator generator = new DefaultMetricIdGenerator(context); + // when + MetricId id = generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES); + // then + assertThat(id.getName()).isEqualTo(expectedName); + assertThat(id.getTags()).isEmpty(); + } + + @DataProvider + public static Object[][] sessionMetrics() { + String suffix = DefaultSessionMetric.CONNECTED_NODES.getPath(); + return new Object[][] { + new Object[] {"", "s0." + suffix}, + new Object[] {"cassandra", "cassandra.s0." + suffix}, + new Object[] {"app.cassandra", "app.cassandra.s0." + suffix} + }; + } + + @DataProvider + public static Object[][] nodeMetrics() { + String suffix = DefaultNodeMetric.CQL_MESSAGES.getPath(); + return new Object[][] { + new Object[] {"", "s0.nodes.10_1_2_3:9042." + suffix}, + new Object[] {"cassandra", "cassandra.s0.nodes.10_1_2_3:9042." + suffix}, + new Object[] {"app.cassandra", "app.cassandra.s0.nodes.10_1_2_3:9042." + suffix} + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java new file mode 100644 index 00000000000..339f9235dc2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DefaultMetricIdTest.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableMap; +import org.junit.Test; + +public class DefaultMetricIdTest { + + @Test + public void testGetName() { + DefaultMetricId id = new DefaultMetricId("metric1", ImmutableMap.of()); + assertThat(id.getName()).isEqualTo("metric1"); + } + + @Test + public void testGetTags() { + DefaultMetricId id = + new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); + assertThat(id.getTags()) + .hasSize(2) + .containsEntry("tag1", "value1") + .containsEntry("tag2", "value2"); + } + + @Test + public void testEquals() { + DefaultMetricId id1 = + new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); + DefaultMetricId id2 = + new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); + DefaultMetricId id3 = + new DefaultMetricId("metric2", ImmutableMap.of("tag1", "value1", "tag2", "value2")); + DefaultMetricId id4 = new DefaultMetricId("metric1", ImmutableMap.of("tag2", "value2")); + assertThat(id1).isEqualTo(id2).isNotEqualTo(id3).isNotEqualTo(id4); + } + + @Test + public void testHashCode() { + DefaultMetricId id1 = + new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); + DefaultMetricId id2 = + new DefaultMetricId("metric1", ImmutableMap.of("tag1", "value1", "tag2", "value2")); + assertThat(id1).hasSameHashCodeAs(id2); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java new file mode 100644 index 00000000000..e5983c4f4fd --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardMetricsFactoryTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.codahale.metrics.MetricRegistry; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import org.junit.Test; + +public class DropwizardMetricsFactoryTest { + + @Test + public void should_throw_if_registry_of_wrong_type() { + // given + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + List enabledMetrics = + Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); + // when + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getConfig()).thenReturn(config); + when(context.getSessionName()).thenReturn("MockSession"); + // registry object is not a registry type + when(context.getMetricRegistry()).thenReturn(Integer.MAX_VALUE); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofHours(1)); + when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) + .thenReturn(enabledMetrics); + // then + try { + new DropwizardMetricsFactory(context); + fail( + "MetricsFactory should require correct registry object type: " + + MetricRegistry.class.getName()); + } catch (IllegalArgumentException iae) { + assertThat(iae.getMessage()) + .isEqualTo( + "Unexpected Metrics registry object. " + + "Expected registry object to be of type '%s', but was '%s'", + MetricRegistry.class.getName(), Integer.class.getName()); + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java new file mode 100644 index 00000000000..ccc42a7027d --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/DropwizardNodeMetricUpdaterTest.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import com.codahale.metrics.MetricRegistry; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.util.LoggerTest; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.time.Duration; +import java.util.Collections; +import java.util.Set; +import java.util.function.Supplier; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class DropwizardNodeMetricUpdaterTest { + + @Test + public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { + // given + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); + Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(expireAfter); + + DropwizardNodeMetricUpdater updater = + new DropwizardNodeMetricUpdater(node, context, enabledMetrics, new MetricRegistry()) { + @Override + protected void initializeGauge( + NodeMetric metric, DriverExecutionProfile profile, Supplier supplier) { + // do nothing + } + + @Override + protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { + // do nothing + } + + @Override + protected void initializeHdrTimer( + NodeMetric metric, + DriverExecutionProfile profile, + DriverOption highestLatency, + DriverOption significantDigits, + DriverOption interval) { + // do nothing + } + }; + + // then + assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); + verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains( + String.format( + "[prefix] Value too low for %s: %s. Forcing to %s instead.", + DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), + expireAfter, + AbstractMetricUpdater.MIN_EXPIRE_AFTER)); + } + + @Test + @UseDataProvider(value = "acceptableEvictionTimes") + public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( + Duration expireAfter) { + // given + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(expireAfter); + + DropwizardNodeMetricUpdater updater = + new DropwizardNodeMetricUpdater(node, context, enabledMetrics, new MetricRegistry()) { + @Override + protected void initializeGauge( + NodeMetric metric, DriverExecutionProfile profile, Supplier supplier) { + // do nothing + } + + @Override + protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { + // do nothing + } + + @Override + protected void initializeHdrTimer( + NodeMetric metric, + DriverExecutionProfile profile, + DriverOption highestLatency, + DriverOption significantDigits, + DriverOption interval) { + // do nothing + } + }; + + // then + assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); + verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); + } + + @DataProvider + public static Object[][] acceptableEvictionTimes() { + return new Object[][] { + {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, + {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java new file mode 100644 index 00000000000..3a563be4453 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/NoopMetricsFactoryTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.util.LoggerTest; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.util.Collections; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class NoopMetricsFactoryTest { + + @Test + public void should_log_warning_when_metrics_enabled() { + // given + InternalDriverContext context = mock(InternalDriverContext.class); + DriverConfig config = mock(DriverConfig.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + when(context.getSessionName()).thenReturn("MockSession"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) + .thenReturn(Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath())); + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(NoopMetricsFactory.class, Level.WARN); + + // when + new NoopMetricsFactory(context); + + // then + verify(logger.appender, times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains("[MockSession] Some session-level or node-level metrics were enabled"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java new file mode 100644 index 00000000000..809a7419ba4 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/metrics/TaggingMetricIdGeneratorTest.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.google.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class TaggingMetricIdGeneratorTest { + + @Mock private InternalDriverContext context; + + @Mock private DriverConfig config; + + @Mock private DriverExecutionProfile profile; + + @Mock private Node node; + + @Mock private EndPoint endpoint; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + given(context.getConfig()).willReturn(config); + given(context.getSessionName()).willReturn("s0"); + given(config.getDefaultProfile()).willReturn(profile); + given(node.getEndPoint()).willReturn(endpoint); + given(endpoint.toString()).willReturn("/10.1.2.3:9042"); + } + + @Test + @UseDataProvider("sessionMetrics") + public void should_generate_session_metric( + String prefix, String expectedName, Map expectedTags) { + // given + given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) + .willReturn(prefix); + TaggingMetricIdGenerator generator = new TaggingMetricIdGenerator(context); + // when + MetricId id = generator.sessionMetricId(DefaultSessionMetric.CONNECTED_NODES); + // then + assertThat(id.getName()).isEqualTo(expectedName); + assertThat(id.getTags()).isEqualTo(expectedTags); + } + + @Test + @UseDataProvider("nodeMetrics") + public void should_generate_node_metric( + String prefix, String expectedName, Map expectedTags) { + // given + given(profile.getString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, "")) + .willReturn(prefix); + TaggingMetricIdGenerator generator = new TaggingMetricIdGenerator(context); + // when + MetricId id = generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES); + // then + assertThat(id.getName()).isEqualTo(expectedName); + assertThat(id.getTags()).isEqualTo(expectedTags); + } + + @DataProvider + public static Object[][] sessionMetrics() { + String suffix = DefaultSessionMetric.CONNECTED_NODES.getPath(); + ImmutableMap tags = ImmutableMap.of("session", "s0"); + return new Object[][] { + new Object[] {"", "session." + suffix, tags}, + new Object[] {"cassandra", "cassandra.session." + suffix, tags}, + new Object[] {"app.cassandra", "app.cassandra.session." + suffix, tags} + }; + } + + @DataProvider + public static Object[][] nodeMetrics() { + String suffix = DefaultNodeMetric.CQL_MESSAGES.getPath(); + ImmutableMap tags = ImmutableMap.of("session", "s0", "node", "/10.1.2.3:9042"); + return new Object[][] { + new Object[] {"", "nodes." + suffix, tags}, + new Object[] {"cassandra", "cassandra.nodes." + suffix, tags}, + new Object[] {"app.cassandra", "app.cassandra.nodes." + suffix, tags} + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java new file mode 100644 index 00000000000..30dee7847c4 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/os/JnrLibcTest.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.os; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Optional; +import org.junit.Test; + +/** + * Explicitly test native impl based on jnr's POSIX impl. This test should pass on any platform + * which is supported by jnr. + */ +public class JnrLibcTest { + + @Test + public void should_be_available() { + + Libc impl = new JnrLibc(); + assertThat(impl.available()).isTrue(); + } + + @Test + public void should_support_getpid() { + Libc impl = new JnrLibc(); + Optional val = impl.getpid(); + assertThat(val).isNotEmpty(); + assertThat(val.get()).isGreaterThan(1); + } + + @Test + public void should_support_gettimeofday() { + Libc impl = new JnrLibc(); + Optional val = impl.gettimeofday(); + assertThat(val).isNotEmpty(); + assertThat(val.get()).isGreaterThan(0); + + Instant now = Instant.now(); + Instant rvInstant = Instant.EPOCH.plus(val.get(), ChronoUnit.MICROS); + assertThat(rvInstant.isAfter(now.minusSeconds(1))).isTrue(); + assertThat(rvInstant.isBefore(now.plusSeconds(1))).isTrue(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java index b34015f31aa..aeaf28d1fdf 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/os/NativeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +17,15 @@ */ package com.datastax.oss.driver.internal.core.os; -import static org.assertj.core.api.Java6Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThat; import org.junit.Test; public class NativeTest { - /** Verifies that {@link Native#getCPU()} returns non-empty cpu architecture */ + /** Verifies that {@link Native#getCpu()} returns non-empty cpu architecture */ @Test - public void should_return_cpu_if_call_is_available() { - if (Native.isPlatformAvailable()) { - assertThat(Native.getCPU()).isNotEmpty(); - } + public void should_return_cpu_info() { + assertThat(Native.getCpu()).isNotEmpty(); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java index 3acfeb3b65d..5c7257d8c3f 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolInitTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +19,9 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.awaitility.Awaitility.await; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -58,11 +60,10 @@ public void should_initialize_when_all_channels_succeed() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 3); - waitForPendingAdminTasks(); assertThatStage(poolFuture) .isSuccess(pool -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3)); - verify(eventBus, times(3)).fire(ChannelEvent.channelOpened(node)); + verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); factoryHelper.verifyNoMoreCalls(); } @@ -82,11 +83,10 @@ public void should_initialize_when_all_channels_fail() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 3); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(pool -> assertThat(pool.channels).isEmpty()); verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - verify(nodeMetricUpdater, times(3)) + verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); factoryHelper.verifyNoMoreCalls(); @@ -107,12 +107,11 @@ public void should_indicate_when_keyspace_failed_on_all_channels() { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 3); - waitForPendingAdminTasks(); assertThatStage(poolFuture) .isSuccess( pool -> { assertThat(pool.isInvalidKeyspace()).isTrue(); - verify(nodeMetricUpdater, times(3)) + verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); }); } @@ -133,12 +132,12 @@ public void should_fire_force_down_event_when_cluster_name_does_not_match() thro ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 3); - waitForPendingAdminTasks(); - verify(eventBus).fire(TopologyEvent.forceDown(node.getBroadcastRpcAddress().get())); + verify(eventBus, VERIFY_TIMEOUT) + .fire(TopologyEvent.forceDown(node.getBroadcastRpcAddress().get())); verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); - verify(nodeMetricUpdater, times(3)) + verify(nodeMetricUpdater, VERIFY_TIMEOUT.times(3)) .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); factoryHelper.verifyNoMoreCalls(); } @@ -167,26 +166,25 @@ public void should_reconnect_when_init_incomplete() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); // A reconnection should have been scheduled - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); channel2Future.complete(channel2); factoryHelper.waitForCalls(node, 1); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel2); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); - verify(nodeMetricUpdater).incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); + verify(nodeMetricUpdater, VERIFY_TIMEOUT) + .incrementCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, null); factoryHelper.verifyNoMoreCalls(); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java index a5a6e33c821..4273a51f891 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,7 +51,6 @@ public void should_switch_keyspace_on_existing_channels() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); @@ -57,10 +58,9 @@ public void should_switch_keyspace_on_existing_channels() throws Exception { CqlIdentifier newKeyspace = CqlIdentifier.fromCql("new_keyspace"); CompletionStage setKeyspaceFuture = pool.setKeyspace(newKeyspace); - waitForPendingAdminTasks(); - verify(channel1).setKeyspace(newKeyspace); - verify(channel2).setKeyspace(newKeyspace); + verify(channel1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); + verify(channel2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); assertThatStage(setKeyspaceFuture).isSuccess(); @@ -91,30 +91,27 @@ public void should_switch_keyspace_on_pending_channels() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); // Check that reconnection has kicked in, but do not complete it yet - verify(reconnectionSchedule).nextDelay(); - verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); factoryHelper.waitForCalls(node, 2); // Switch keyspace, it succeeds immediately since there is no active channel CqlIdentifier newKeyspace = CqlIdentifier.fromCql("new_keyspace"); CompletionStage setKeyspaceFuture = pool.setKeyspace(newKeyspace); - waitForPendingAdminTasks(); assertThatStage(setKeyspaceFuture).isSuccess(); // Now let the two channels succeed to complete the reconnection channel1Future.complete(channel1); channel2Future.complete(channel2); - waitForPendingAdminTasks(); - verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); - verify(channel1).setKeyspace(newKeyspace); - verify(channel2).setKeyspace(newKeyspace); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); + verify(channel1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); + verify(channel2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); factoryHelper.verifyNoMoreCalls(); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java index a932bfb4bea..c4538f78bdb 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolReconnectTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +19,10 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.awaitility.Awaitility.await; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -34,6 +36,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import org.junit.Test; import org.mockito.InOrder; @@ -63,29 +66,26 @@ public void should_reconnect_when_channel_closes() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1, channel2); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); // Simulate fatal error on channel2 ((ChannelPromise) channel2.closeFuture()) .setFailure(new Exception("mock channel init failure")); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); factoryHelper.waitForCall(node); channel3Future.complete(channel3); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); - verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel3); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel3)); factoryHelper.verifyNoMoreCalls(); } @@ -114,28 +114,25 @@ public void should_reconnect_when_channel_starts_graceful_shutdown() throws Exce ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1, channel2); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); // Simulate graceful shutdown on channel2 ((ChannelPromise) channel2.closeStartedFuture()).setSuccess(); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); factoryHelper.waitForCall(node); channel3Future.complete(channel3); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); - verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel3); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel3)); factoryHelper.verifyNoMoreCalls(); } @@ -164,33 +161,30 @@ public void should_let_current_attempt_complete_when_reconnecting_now() CompletionStage poolFuture = ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 1); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); - inOrder.verify(eventBus, times(1)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelOpened(node)); // Kill channel1, reconnection begins and starts initializing channel2, but the initialization // is still pending (channel2Future not completed) ((ChannelPromise) channel1.closeStartedFuture()).setSuccess(); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelClosed(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); - verify(reconnectionSchedule).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); factoryHelper.waitForCalls(node, 1); // Force a reconnection, should not try to create a new channel since we have a pending one pool.reconnectNow(); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(200); factoryHelper.verifyNoMoreCalls(); inOrder.verify(eventBus, never()).fire(any()); // Complete the initialization of channel2, reconnection succeeds channel2Future.complete(channel2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); - verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); + verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel2); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel2)); factoryHelper.verifyNoMoreCalls(); } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java index 57e5cf145eb..6992bb7742a 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolResizeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +19,9 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.awaitility.Awaitility.await; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -32,6 +34,7 @@ import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import java.util.concurrent.TimeUnit; import org.junit.Test; import org.mockito.InOrder; @@ -59,19 +62,17 @@ public void should_shrink_outside_of_reconnection() throws Exception { ChannelPool.init(node, null, NodeDistance.REMOTE, context, "test"); factoryHelper.waitForCalls(node, 4); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); - inOrder.verify(eventBus, times(4)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(4)).fire(ChannelEvent.channelOpened(node)); pool.resize(NodeDistance.LOCAL); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); - assertThat(pool.channels).containsOnly(channel3, channel4); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel3, channel4)); factoryHelper.verifyNoMoreCalls(); } @@ -106,9 +107,8 @@ public void should_shrink_during_reconnection() throws Exception { ChannelPool.init(node, null, NodeDistance.REMOTE, context, "test"); factoryHelper.waitForCalls(node, 4); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1, channel2); @@ -119,20 +119,19 @@ public void should_shrink_during_reconnection() throws Exception { pool.resize(NodeDistance.LOCAL); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(200); // Now allow the reconnected channels to complete initialization channel3Future.complete(channel3); channel4Future.complete(channel4); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - // Pool should have shrinked back to 2. We keep the most recent channels so 1 and 2 get closed. - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelClosed(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel3, channel4); + // Pool should have shrunk back to 2. We keep the most recent channels so 1 and 2 get closed. + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel3, channel4)); factoryHelper.verifyNoMoreCalls(); } @@ -163,26 +162,25 @@ public void should_grow_outside_of_reconnection() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1, channel2); pool.resize(NodeDistance.REMOTE); - waitForPendingAdminTasks(); // The resizing should have triggered a reconnection - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); + await() + .untilAsserted( + () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); factoryHelper.verifyNoMoreCalls(); } @@ -218,31 +216,29 @@ public void should_grow_during_reconnection() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1); // A reconnection should have been scheduled to add the missing channel, don't complete yet - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); pool.resize(NodeDistance.REMOTE); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(200); // Complete the channel for the first reconnection, bringing the count to 2 channel2Future.complete(channel2); factoryHelper.waitForCall(node); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - assertThat(pool.channels).containsOnly(channel1, channel2); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); // A second attempt should have been scheduled since we're now still under the target size - verify(reconnectionSchedule, times(2)).nextDelay(); + verify(reconnectionSchedule, VERIFY_TIMEOUT.times(2)).nextDelay(); // Same reconnection is still running, no additional events inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStopped(node)); inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStarted(node)); @@ -251,11 +247,12 @@ public void should_grow_during_reconnection() throws Exception { factoryHelper.waitForCalls(node, 2); channel3Future.complete(channel3); channel4Future.complete(channel4); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); + await() + .untilAsserted( + () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); factoryHelper.verifyNoMoreCalls(); } @@ -285,8 +282,7 @@ public void should_resize_outside_of_reconnection_if_config_changes() throws Exc ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); @@ -295,18 +291,18 @@ public void should_resize_outside_of_reconnection_if_config_changes() throws Exc // Simulate a configuration change when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(4); eventBus.fire(ConfigChangeEvent.INSTANCE); - waitForPendingAdminTasks(); // It should have triggered a reconnection - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); + await() + .untilAsserted( + () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); factoryHelper.verifyNoMoreCalls(); } @@ -341,32 +337,30 @@ public void should_resize_during_reconnection_if_config_changes() throws Excepti ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); assertThat(pool.channels).containsOnly(channel1); // A reconnection should have been scheduled to add the missing channel, don't complete yet - verify(reconnectionSchedule).nextDelay(); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStarted(node)); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStarted(node)); // Simulate a configuration change when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE)).thenReturn(4); eventBus.fire(ConfigChangeEvent.INSTANCE); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(200); // Complete the channel for the first reconnection, bringing the count to 2 channel2Future.complete(channel2); factoryHelper.waitForCall(node); - waitForPendingAdminTasks(); - inOrder.verify(eventBus).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.channelOpened(node)); - assertThat(pool.channels).containsOnly(channel1, channel2); + await().untilAsserted(() -> assertThat(pool.channels).containsOnly(channel1, channel2)); // A second attempt should have been scheduled since we're now still under the target size - verify(reconnectionSchedule, times(2)).nextDelay(); + verify(reconnectionSchedule, VERIFY_TIMEOUT.times(2)).nextDelay(); // Same reconnection is still running, no additional events inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStopped(node)); inOrder.verify(eventBus, never()).fire(ChannelEvent.reconnectionStarted(node)); @@ -375,11 +369,12 @@ public void should_resize_during_reconnection_if_config_changes() throws Excepti factoryHelper.waitForCalls(node, 2); channel3Future.complete(channel3); channel4Future.complete(channel4); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); - inOrder.verify(eventBus).fire(ChannelEvent.reconnectionStopped(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT).fire(ChannelEvent.reconnectionStopped(node)); - assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4); + await() + .untilAsserted( + () -> assertThat(pool.channels).containsOnly(channel1, channel2, channel3, channel4)); factoryHelper.verifyNoMoreCalls(); } @@ -403,8 +398,7 @@ public void should_ignore_config_change_if_not_relevant() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 2); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); @@ -413,7 +407,7 @@ public void should_ignore_config_change_if_not_relevant() throws Exception { // Config changes, but not for our distance when(defaultProfile.getInt(DefaultDriverOption.CONNECTION_POOL_REMOTE_SIZE)).thenReturn(1); eventBus.fire(ConfigChangeEvent.INSTANCE); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(200); // It should not have triggered a reconnection verify(reconnectionSchedule, never()).nextDelay(); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java index 3efb2147247..b40bcb4aa39 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolShutdownTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +20,6 @@ import static com.datastax.oss.driver.Assertions.assertThatStage; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -62,37 +63,33 @@ public void should_close_all_channels_when_closed() throws Exception { ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 3); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(3)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); // Simulate graceful shutdown on channel3 ((ChannelPromise) channel3.closeStartedFuture()).setSuccess(); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(1)).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelClosed(node)); // Reconnection should have kicked in and started to open channel4, do not complete it yet - verify(reconnectionSchedule).nextDelay(); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); factoryHelper.waitForCalls(node, 1); CompletionStage closeFuture = pool.closeAsync(); - waitForPendingAdminTasks(); // The two original channels were closed normally - verify(channel1).close(); - verify(channel2).close(); - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelClosed(node)); + verify(channel1, VERIFY_TIMEOUT).close(); + verify(channel2, VERIFY_TIMEOUT).close(); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); // The closing channel was not closed again verify(channel3, never()).close(); // Complete the reconnecting channel channel4Future.complete(channel4); - waitForPendingAdminTasks(); // It should be force-closed once we find out the pool was closed - verify(channel4).forceClose(); + verify(channel4, VERIFY_TIMEOUT).forceClose(); // No events because the channel was never really associated to the pool inOrder.verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); inOrder.verify(eventBus, never()).fire(ChannelEvent.channelClosed(node)); @@ -133,37 +130,33 @@ public void should_force_close_all_channels_when_force_closed() throws Exception ChannelPool.init(node, null, NodeDistance.LOCAL, context, "test"); factoryHelper.waitForCalls(node, 3); - waitForPendingAdminTasks(); assertThatStage(poolFuture).isSuccess(); ChannelPool pool = poolFuture.toCompletableFuture().get(); - inOrder.verify(eventBus, times(3)).fire(ChannelEvent.channelOpened(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(3)).fire(ChannelEvent.channelOpened(node)); // Simulate graceful shutdown on channel3 ((ChannelPromise) channel3.closeStartedFuture()).setSuccess(); - waitForPendingAdminTasks(); - inOrder.verify(eventBus, times(1)).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(1)).fire(ChannelEvent.channelClosed(node)); // Reconnection should have kicked in and started to open a channel, do not complete it yet - verify(reconnectionSchedule).nextDelay(); + verify(reconnectionSchedule, VERIFY_TIMEOUT).nextDelay(); factoryHelper.waitForCalls(node, 1); CompletionStage closeFuture = pool.forceCloseAsync(); - waitForPendingAdminTasks(); // The three original channels were force-closed - verify(channel1).forceClose(); - verify(channel2).forceClose(); - verify(channel3).forceClose(); + verify(channel1, VERIFY_TIMEOUT).forceClose(); + verify(channel2, VERIFY_TIMEOUT).forceClose(); + verify(channel3, VERIFY_TIMEOUT).forceClose(); // Only two events because the one for channel3 was sent earlier - inOrder.verify(eventBus, times(2)).fire(ChannelEvent.channelClosed(node)); + inOrder.verify(eventBus, VERIFY_TIMEOUT.times(2)).fire(ChannelEvent.channelClosed(node)); // Complete the reconnecting channel channel4Future.complete(channel4); - waitForPendingAdminTasks(); // It should be force-closed once we find out the pool was closed - verify(channel4).forceClose(); + verify(channel4, VERIFY_TIMEOUT).forceClose(); // No events because the channel was never really associated to the pool inOrder.verify(eventBus, never()).fire(ChannelEvent.channelOpened(node)); inOrder.verify(eventBus, never()).fire(ChannelEvent.channelClosed(node)); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java index 16164c950e3..2f8056e49e0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelPoolTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +17,10 @@ */ package com.datastax.oss.driver.internal.core.pool; -import static org.assertj.core.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.when; import com.datastax.oss.driver.api.core.CqlIdentifier; @@ -35,23 +37,23 @@ import com.datastax.oss.driver.internal.core.metadata.TestNodeFactory; import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; import io.netty.channel.Channel; import io.netty.channel.DefaultChannelPromise; import io.netty.channel.DefaultEventLoopGroup; import io.netty.channel.EventLoop; -import io.netty.util.concurrent.Future; import java.time.Duration; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import org.junit.After; import org.junit.Before; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.mockito.verification.VerificationWithTimeout; abstract class ChannelPoolTestBase { + /** How long we wait when verifying mocks for async invocations */ + protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(2000); + @Mock protected InternalDriverContext context; @Mock private DriverConfig config; @Mock protected DriverExecutionProfile defaultProfile; @@ -111,17 +113,4 @@ DriverChannel newMockDriverChannel(int id) { when(driverChannel.toString()).thenReturn("channel" + id); return driverChannel; } - - // Wait for all the tasks on the pool's admin executor to complete. - void waitForPendingAdminTasks() { - // This works because the event loop group is single-threaded - Future f = adminEventLoopGroup.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 100, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java index 5e1e12d13d8..628110bc1df 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/pool/ChannelSetTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,6 +46,9 @@ public void should_return_null_when_empty() { @Test public void should_return_element_when_single() { + // Given + when(channel1.preAcquireId()).thenReturn(true); + // When set.add(channel1); @@ -51,6 +56,20 @@ public void should_return_element_when_single() { assertThat(set.size()).isEqualTo(1); assertThat(set.next()).isEqualTo(channel1); verify(channel1, never()).getAvailableIds(); + verify(channel1).preAcquireId(); + } + + @Test + public void should_return_null_when_single_but_full() { + // Given + when(channel1.preAcquireId()).thenReturn(false); + + // When + set.add(channel1); + + // Then + assertThat(set.next()).isNull(); + verify(channel1).preAcquireId(); } @Test @@ -59,6 +78,7 @@ public void should_return_most_available_when_multiple() { when(channel1.getAvailableIds()).thenReturn(2); when(channel2.getAvailableIds()).thenReturn(12); when(channel3.getAvailableIds()).thenReturn(8); + when(channel2.preAcquireId()).thenReturn(true); // When set.add(channel1); @@ -71,12 +91,31 @@ public void should_return_most_available_when_multiple() { verify(channel1).getAvailableIds(); verify(channel2).getAvailableIds(); verify(channel3).getAvailableIds(); + verify(channel2).preAcquireId(); // When when(channel1.getAvailableIds()).thenReturn(15); + when(channel1.preAcquireId()).thenReturn(true); // Then assertThat(set.next()).isEqualTo(channel1); + verify(channel1).preAcquireId(); + } + + @Test + public void should_return_null_when_multiple_but_all_full() { + // Given + when(channel1.getAvailableIds()).thenReturn(0); + when(channel2.getAvailableIds()).thenReturn(0); + when(channel3.getAvailableIds()).thenReturn(0); + + // When + set.add(channel1); + set.add(channel2); + set.add(channel3); + + // Then + assertThat(set.next()).isNull(); } @Test @@ -85,6 +124,7 @@ public void should_remove_channels() { when(channel1.getAvailableIds()).thenReturn(2); when(channel2.getAvailableIds()).thenReturn(12); when(channel3.getAvailableIds()).thenReturn(8); + when(channel2.preAcquireId()).thenReturn(true); set.add(channel1); set.add(channel2); @@ -93,6 +133,7 @@ public void should_remove_channels() { // When set.remove(channel2); + when(channel3.preAcquireId()).thenReturn(true); // Then assertThat(set.size()).isEqualTo(2); @@ -100,6 +141,7 @@ public void should_remove_channels() { // When set.remove(channel3); + when(channel1.preAcquireId()).thenReturn(true); // Then assertThat(set.size()).isEqualTo(1); @@ -112,4 +154,26 @@ public void should_remove_channels() { assertThat(set.size()).isEqualTo(0); assertThat(set.next()).isNull(); } + + /** + * Check that {@link ChannelSet#next()} doesn't spin forever if it keeps racing (see comments in + * the implementation). + */ + @Test + public void should_not_loop_indefinitely_if_acquisition_keeps_failing() { + // Given + when(channel1.getAvailableIds()).thenReturn(2); + when(channel2.getAvailableIds()).thenReturn(12); + when(channel3.getAvailableIds()).thenReturn(8); + // channel2 is the most available but we keep failing to acquire (simulating the race condition) + when(channel2.preAcquireId()).thenReturn(false); + + // When + set.add(channel1); + set.add(channel2); + set.add(channel3); + + // Then + assertThat(set.next()).isNull(); + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java new file mode 100644 index 00000000000..1911c7c7227 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BuiltInCompressorsTest.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.protocol.internal.NoopCompressor; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Locale; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(DataProviderRunner.class) +public class BuiltInCompressorsTest { + + @Mock private DriverContext context; + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + } + + @Test + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_create_instance_for_supported_algorithms(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(BuiltInCompressors.newInstance("lz4", context)).isInstanceOf(Lz4Compressor.class); + assertThat(BuiltInCompressors.newInstance("snappy", context)) + .isInstanceOf(SnappyCompressor.class); + assertThat(BuiltInCompressors.newInstance("none", context)) + .isInstanceOf(NoopCompressor.class); + assertThat(BuiltInCompressors.newInstance("LZ4", context)).isInstanceOf(Lz4Compressor.class); + assertThat(BuiltInCompressors.newInstance("SNAPPY", context)) + .isInstanceOf(SnappyCompressor.class); + assertThat(BuiltInCompressors.newInstance("NONE", context)) + .isInstanceOf(NoopCompressor.class); + } finally { + Locale.setDefault(def); + } + } + + @Test + public void should_throw_when_unsupported_algorithm() { + assertThatThrownBy(() -> BuiltInCompressors.newInstance("GZIP", context)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Unsupported compression algorithm 'GZIP'"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java index fadb80f871b..895a650b292 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/ByteBufPrimitiveCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.internal.core.protocol; import static com.datastax.oss.driver.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.datastax.oss.driver.internal.core.util.ByteBufs; import com.datastax.oss.protocol.internal.util.Bytes; @@ -24,9 +27,7 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; /** * Note: we don't test trivial methods that simply delegate to ByteBuf, nor default implementations @@ -35,8 +36,6 @@ public class ByteBufPrimitiveCodecTest { private ByteBufPrimitiveCodec codec = new ByteBufPrimitiveCodec(ByteBufAllocator.DEFAULT); - @Rule public ExpectedException expectedException = ExpectedException.none(); - @Test public void should_concatenate() { ByteBuf left = ByteBufs.wrap(0xca, 0xfe); @@ -91,8 +90,6 @@ public void should_read_inet_v6() { @Test public void should_fail_to_read_inet_if_length_invalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Invalid address length: 3 ([127, 0, 1])"); ByteBuf source = ByteBufs.wrap( // length (as a byte) @@ -106,7 +103,9 @@ public void should_fail_to_read_inet_if_length_invalid() { 0x00, 0x23, 0x52); - codec.readInet(source); + assertThatThrownBy(() -> codec.readInet(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Invalid address length: 3 ([127, 0, 1])"); } @Test @@ -136,9 +135,6 @@ public void should_read_inetaddr_v6() { @Test public void should_fail_to_read_inetaddr_if_length_invalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Invalid address length: 3 ([127, 0, 1])"); - ByteBuf source = ByteBufs.wrap( // length (as a byte) @@ -147,7 +143,9 @@ public void should_fail_to_read_inetaddr_if_length_invalid() { 0x7f, 0x00, 0x01); - codec.readInetAddr(source); + assertThatThrownBy(() -> codec.readInetAddr(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Invalid address length: 3 ([127, 0, 1])"); } @Test @@ -168,6 +166,64 @@ public void should_read_bytes() { assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); } + @Test + public void should_read_bytes_when_extra_data() { + ByteBuf source = + ByteBufs.wrap( + // length (as an int) + 0x00, + 0x00, + 0x00, + 0x04, + // contents + 0xca, + 0xfe, + 0xba, + 0xbe, + 0xde, + 0xda, + 0xdd); + ByteBuffer bytes = codec.readBytes(source); + assertThat(Bytes.toHexString(bytes)).isEqualTo("0xcafebabe"); + } + + @Test + public void read_bytes_should_udpate_reader_index() { + ByteBuf source = + ByteBufs.wrap( + // length (as an int) + 0x00, + 0x00, + 0x00, + 0x04, + // contents + 0xca, + 0xfe, + 0xba, + 0xbe, + 0xde, + 0xda, + 0xdd); + codec.readBytes(source); + + assertThat(source.readerIndex()).isEqualTo(8); + } + + @Test + public void read_bytes_should_throw_when_not_enough_content() { + ByteBuf source = + ByteBufs.wrap( + // length (as an int) : 4 bytes + 0x00, + 0x00, + 0x00, + 0x04, + // contents : only 2 bytes + 0xca, + 0xfe); + assertThatThrownBy(() -> codec.readBytes(source)).isInstanceOf(IndexOutOfBoundsException.class); + } + @Test public void should_read_null_bytes() { ByteBuf source = ByteBufs.wrap(0xFF, 0xFF, 0xFF, 0xFF); // -1 (as an int) @@ -207,14 +263,12 @@ public void should_read_string() { @Test public void should_fail_to_read_string_if_not_enough_characters() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage( - "Not enough bytes to read an UTF-8 serialized string of size 4"); - ByteBuf source = codec.allocate(2); source.writeShort(4); - codec.readString(source); + assertThatThrownBy(() -> codec.readString(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); } @Test @@ -237,13 +291,12 @@ public void should_read_long_string() { @Test public void should_fail_to_read_long_string_if_not_enough_characters() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage( - "Not enough bytes to read an UTF-8 serialized string of size 4"); ByteBuf source = codec.allocate(4); source.writeInt(4); - codec.readLongString(source); + assertThatThrownBy(() -> codec.readLongString(source)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Not enough bytes to read an UTF-8 serialized string of size 4"); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java new file mode 100644 index 00000000000..d151da309c1 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/BytesToSegmentDecoderTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.connection.CrcMismatchException; +import com.datastax.oss.protocol.internal.Compressor; +import com.datastax.oss.protocol.internal.Segment; +import com.datastax.oss.protocol.internal.SegmentCodec; +import com.google.common.base.Strings; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.DecoderException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class BytesToSegmentDecoderTest { + + // Hard-coded test data, the values were generated with our encoding methods. + // We're not really testing the decoding itself here, only that our subclass calls the + // LengthFieldBasedFrameDecoder parent constructor with the right parameters. + private static final ByteBuf REGULAR_HEADER = byteBuf("04000201f9f2"); + private static final ByteBuf REGULAR_PAYLOAD = byteBuf("00000001"); + private static final ByteBuf REGULAR_TRAILER = byteBuf("1fd6022d"); + private static final ByteBuf REGULAR_WRONG_HEADER = byteBuf("04000202f9f2"); + private static final ByteBuf REGULAR_WRONG_TRAILER = byteBuf("1fd6022e"); + + private static final ByteBuf MAX_HEADER = byteBuf("ffff03254047"); + private static final ByteBuf MAX_PAYLOAD = + byteBuf(Strings.repeat("01", Segment.MAX_PAYLOAD_LENGTH)); + private static final ByteBuf MAX_TRAILER = byteBuf("a05c2f13"); + + private static final ByteBuf LZ4_HEADER = byteBuf("120020000491c94f"); + private static final ByteBuf LZ4_PAYLOAD_UNCOMPRESSED = + byteBuf("00000001000000010000000100000001"); + private static final ByteBuf LZ4_PAYLOAD_COMPRESSED = + byteBuf("f00100000001000000010000000100000001"); + private static final ByteBuf LZ4_TRAILER = byteBuf("2bd67f90"); + + private static final Compressor LZ4_COMPRESSOR = new Lz4Compressor("test"); + + private EmbeddedChannel channel; + + @Before + public void setup() { + channel = new EmbeddedChannel(); + } + + @Test + public void should_decode_regular_segment() { + channel.pipeline().addLast(newDecoder(Compressor.none())); + channel.writeInbound(Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); + Segment segment = channel.readInbound(); + assertThat(segment.isSelfContained).isTrue(); + assertThat(segment.payload).isEqualTo(REGULAR_PAYLOAD); + } + + @Test + public void should_decode_max_length_segment() { + channel.pipeline().addLast(newDecoder(Compressor.none())); + channel.writeInbound(Unpooled.wrappedBuffer(MAX_HEADER, MAX_PAYLOAD, MAX_TRAILER)); + Segment segment = channel.readInbound(); + assertThat(segment.isSelfContained).isTrue(); + assertThat(segment.payload).isEqualTo(MAX_PAYLOAD); + } + + @Test + public void should_decode_segment_from_multiple_incoming_chunks() { + channel.pipeline().addLast(newDecoder(Compressor.none())); + // Send the header in two slices, to cover the case where the length can't be read the first + // time: + ByteBuf headerStart = REGULAR_HEADER.slice(0, 3); + ByteBuf headerEnd = REGULAR_HEADER.slice(3, 3); + channel.writeInbound(headerStart); + channel.writeInbound(headerEnd); + channel.writeInbound(REGULAR_PAYLOAD.duplicate()); + channel.writeInbound(REGULAR_TRAILER.duplicate()); + Segment segment = channel.readInbound(); + assertThat(segment.isSelfContained).isTrue(); + assertThat(segment.payload).isEqualTo(REGULAR_PAYLOAD); + } + + @Test + public void should_decode_compressed_segment() { + channel.pipeline().addLast(newDecoder(LZ4_COMPRESSOR)); + // We need a contiguous buffer for this one, because of how our decompressor operates + ByteBuf buffer = Unpooled.wrappedBuffer(LZ4_HEADER, LZ4_PAYLOAD_COMPRESSED, LZ4_TRAILER).copy(); + channel.writeInbound(buffer); + Segment segment = channel.readInbound(); + assertThat(segment.isSelfContained).isTrue(); + assertThat(segment.payload).isEqualTo(LZ4_PAYLOAD_UNCOMPRESSED); + } + + @Test + public void should_surface_header_crc_mismatch() { + try { + channel.pipeline().addLast(newDecoder(Compressor.none())); + channel.writeInbound( + Unpooled.wrappedBuffer(REGULAR_WRONG_HEADER, REGULAR_PAYLOAD, REGULAR_TRAILER)); + fail("Expected a " + DecoderException.class.getSimpleName()); + } catch (DecoderException exception) { + assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); + } + } + + @Test + public void should_surface_trailer_crc_mismatch() { + try { + channel.pipeline().addLast(newDecoder(Compressor.none())); + channel.writeInbound( + Unpooled.wrappedBuffer(REGULAR_HEADER, REGULAR_PAYLOAD, REGULAR_WRONG_TRAILER)); + fail("Expected a " + DecoderException.class.getSimpleName()); + } catch (DecoderException exception) { + assertThat(exception).hasCauseInstanceOf(CrcMismatchException.class); + } + } + + private BytesToSegmentDecoder newDecoder(Compressor compressor) { + return new BytesToSegmentDecoder( + new SegmentCodec<>( + new ByteBufPrimitiveCodec(UnpooledByteBufAllocator.DEFAULT), compressor)); + } + + private static ByteBuf byteBuf(String hex) { + return Unpooled.unreleasableBuffer( + Unpooled.wrappedBuffer(ByteBufUtil.decodeHexDump(hex)).asReadOnly()); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java index c223cb15462..0ab61771da0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/FrameDecoderTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java new file mode 100644 index 00000000000..2886adeab4e --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SegmentToFrameDecoderTest.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.shaded.guava.common.base.Strings; +import com.datastax.oss.protocol.internal.Compressor; +import com.datastax.oss.protocol.internal.Frame; +import com.datastax.oss.protocol.internal.FrameCodec; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.datastax.oss.protocol.internal.ProtocolV5ClientCodecs; +import com.datastax.oss.protocol.internal.ProtocolV5ServerCodecs; +import com.datastax.oss.protocol.internal.Segment; +import com.datastax.oss.protocol.internal.request.AuthResponse; +import com.datastax.oss.protocol.internal.response.result.Void; +import com.datastax.oss.protocol.internal.util.Bytes; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.embedded.EmbeddedChannel; +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; + +public class SegmentToFrameDecoderTest { + + private static final FrameCodec FRAME_CODEC = + new FrameCodec<>( + new ByteBufPrimitiveCodec(UnpooledByteBufAllocator.DEFAULT), + Compressor.none(), + new ProtocolV5ClientCodecs(), + new ProtocolV5ServerCodecs()); + + private EmbeddedChannel channel; + + @Before + public void setup() { + channel = new EmbeddedChannel(); + channel.pipeline().addLast(new SegmentToFrameDecoder(FRAME_CODEC, "test")); + } + + @Test + public void should_decode_self_contained() { + ByteBuf payload = UnpooledByteBufAllocator.DEFAULT.buffer(); + payload.writeBytes(encodeFrame(Void.INSTANCE)); + payload.writeBytes(encodeFrame(new AuthResponse(Bytes.fromHexString("0xabcdef")))); + + channel.writeInbound(new Segment<>(payload, true)); + + Frame frame1 = channel.readInbound(); + assertThat(frame1.message).isInstanceOf(Void.class); + Frame frame2 = channel.readInbound(); + assertThat(frame2.message).isInstanceOf(AuthResponse.class); + } + + @Test + public void should_decode_sequence_of_slices() { + ByteBuf encodedFrame = + encodeFrame(new AuthResponse(Bytes.fromHexString("0x" + Strings.repeat("aa", 1011)))); + int sliceLength = 100; + do { + ByteBuf payload = + encodedFrame.readRetainedSlice(Math.min(sliceLength, encodedFrame.readableBytes())); + channel.writeInbound(new Segment<>(payload, false)); + } while (encodedFrame.isReadable()); + + Frame frame = channel.readInbound(); + assertThat(frame.message).isInstanceOf(AuthResponse.class); + } + + private static ByteBuf encodeFrame(Message message) { + Frame frame = + Frame.forResponse( + ProtocolConstants.Version.V5, + 1, + null, + Collections.emptyMap(), + Collections.emptyList(), + message); + return FRAME_CODEC.encode(frame); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java new file mode 100644 index 00000000000..736bcb66d56 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/protocol/SliceWriteListenerTest.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.protocol; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import io.netty.channel.ChannelPromise; +import io.netty.channel.embedded.EmbeddedChannel; +import org.junit.Before; +import org.junit.Test; + +public class SliceWriteListenerTest { + + private final EmbeddedChannel channel = new EmbeddedChannel(); + + private ChannelPromise framePromise, slicePromise1, slicePromise2, slicePromise3; + + @Before + public void setup() { + framePromise = channel.newPromise(); + slicePromise1 = channel.newPromise(); + slicePromise2 = channel.newPromise(); + slicePromise3 = channel.newPromise(); + + ByteBufSegmentBuilder.SliceWriteListener listener = + new ByteBufSegmentBuilder.SliceWriteListener( + framePromise, ImmutableList.of(slicePromise1, slicePromise2, slicePromise3)); + slicePromise1.addListener(listener); + slicePromise2.addListener(listener); + slicePromise3.addListener(listener); + + assertThat(framePromise.isDone()).isFalse(); + } + + @Test + public void should_succeed_frame_if_all_slices_succeed() { + slicePromise1.setSuccess(); + assertThat(framePromise.isDone()).isFalse(); + slicePromise2.setSuccess(); + assertThat(framePromise.isDone()).isFalse(); + slicePromise3.setSuccess(); + + assertThat(framePromise.isSuccess()).isTrue(); + } + + @Test + public void should_fail_frame_and_cancel_remaining_slices_if_one_slice_fails() { + slicePromise1.setSuccess(); + assertThat(framePromise.isDone()).isFalse(); + Exception failure = new Exception("test"); + slicePromise2.setFailure(failure); + + assertThat(framePromise.isDone()).isTrue(); + assertThat(framePromise.isSuccess()).isFalse(); + assertThat(framePromise.cause()).isEqualTo(failure); + + assertThat(slicePromise3.isCancelled()).isTrue(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java index dfd616d1e54..58d1783038d 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/DefaultSessionPoolsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,7 @@ import static com.datastax.oss.driver.Assertions.assertThat; import static com.datastax.oss.driver.Assertions.assertThatStage; -import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -60,10 +62,8 @@ import com.datastax.oss.driver.internal.core.pool.ChannelPoolFactory; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; import io.netty.channel.DefaultEventLoopGroup; import io.netty.util.concurrent.DefaultPromise; -import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GlobalEventExecutor; import java.time.Duration; import java.util.Collections; @@ -71,17 +71,18 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.mockito.verification.VerificationWithTimeout; public class DefaultSessionPoolsTest { private static final CqlIdentifier KEYSPACE = CqlIdentifier.fromInternal("ks"); + /** How long we wait when verifying mocks for async invocations */ + protected static final VerificationWithTimeout VERIFY_TIMEOUT = timeout(500); @Mock private InternalDriverContext context; @Mock private NettyOptions nettyOptions; @@ -106,14 +107,13 @@ public class DefaultSessionPoolsTest { private DefaultNode node1; private DefaultNode node2; private DefaultNode node3; - private DefaultEventLoopGroup adminEventLoopGroup; private EventBus eventBus; @Before public void setup() { MockitoAnnotations.initMocks(this); - adminEventLoopGroup = new DefaultEventLoopGroup(1); + DefaultEventLoopGroup adminEventLoopGroup = new DefaultEventLoopGroup(1); when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); when(context.getNettyOptions()).thenReturn(nettyOptions); @@ -130,7 +130,7 @@ public void setup() { // Init sequence: when(metadataManager.refreshNodes()).thenReturn(CompletableFuture.completedFuture(null)); - when(metadataManager.firstSchemaRefreshFuture()) + when(metadataManager.refreshSchema(null, false, true)) .thenReturn(CompletableFuture.completedFuture(null)); when(context.getMetadataManager()).thenReturn(metadataManager); @@ -154,6 +154,7 @@ public void setup() { node1 = mockLocalNode(1); node2 = mockLocalNode(2); node3 = mockLocalNode(3); + @SuppressWarnings("ConstantConditions") ImmutableMap nodes = ImmutableMap.of( node1.getHostId(), node1, @@ -213,14 +214,12 @@ public void should_initialize_pools_with_distances() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.REMOTE); - waitForPendingAdminTasks(); assertThatStage(initFuture).isNotDone(); pool1Future.complete(pool1); pool2Future.complete(pool2); pool3Future.complete(pool3); - waitForPendingAdminTasks(); assertThatStage(initFuture) .isSuccess( @@ -246,7 +245,6 @@ public void should_not_connect_to_ignored_nodes() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture) .isSuccess( session -> @@ -270,7 +268,6 @@ public void should_not_connect_to_forced_down_nodes() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture) .isSuccess( session -> @@ -297,7 +294,6 @@ public void should_adjust_distance_if_changed_while_init() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isNotDone(); @@ -307,9 +303,8 @@ public void should_adjust_distance_if_changed_while_init() { pool1Future.complete(pool1); pool2Future.complete(pool2); pool3Future.complete(pool3); - waitForPendingAdminTasks(); - verify(pool2).resize(NodeDistance.REMOTE); + verify(pool2, VERIFY_TIMEOUT).resize(NodeDistance.REMOTE); assertThatStage(initFuture) .isSuccess( @@ -338,7 +333,6 @@ public void should_remove_pool_if_ignored_while_init() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isNotDone(); @@ -348,9 +342,8 @@ public void should_remove_pool_if_ignored_while_init() { pool1Future.complete(pool1); pool2Future.complete(pool2); pool3Future.complete(pool3); - waitForPendingAdminTasks(); - verify(pool2).closeAsync(); + verify(pool2, VERIFY_TIMEOUT).closeAsync(); assertThatStage(initFuture) .isSuccess( @@ -378,7 +371,6 @@ public void should_remove_pool_if_forced_down_while_init() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isNotDone(); @@ -388,9 +380,8 @@ public void should_remove_pool_if_forced_down_while_init() { pool1Future.complete(pool1); pool2Future.complete(pool2); pool3Future.complete(pool3); - waitForPendingAdminTasks(); - verify(pool2).closeAsync(); + verify(pool2, VERIFY_TIMEOUT).closeAsync(); assertThatStage(initFuture) .isSuccess( @@ -415,7 +406,6 @@ public void should_resize_pool_if_distance_changes() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); eventBus.fire(new DistanceEvent(NodeDistance.REMOTE, node2)); @@ -439,18 +429,20 @@ public void should_remove_pool_if_node_becomes_ignored() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); verify(pool2, timeout(500)).closeAsync(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); + await() + .untilAsserted( + () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); } @Test - public void should_do_nothing_if_node_becomes_ignored_but_was_already_ignored() { + public void should_do_nothing_if_node_becomes_ignored_but_was_already_ignored() + throws InterruptedException { ChannelPool pool1 = mockPool(node1); ChannelPool pool2 = mockPool(node2); ChannelPool pool3 = mockPool(node3); @@ -466,7 +458,6 @@ public void should_do_nothing_if_node_becomes_ignored_but_was_already_ignored() factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); @@ -477,7 +468,7 @@ public void should_do_nothing_if_node_becomes_ignored_but_was_already_ignored() // Fire the same event again, nothing should happen eventBus.fire(new DistanceEvent(NodeDistance.IGNORED, node2)); - waitForPendingAdminTasks(); + TimeUnit.MILLISECONDS.sleep(200); factoryHelper.verifyNoMoreCalls(); } @@ -501,7 +492,6 @@ public void should_recreate_pool_if_node_becomes_not_ignored() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); @@ -509,8 +499,11 @@ public void should_recreate_pool_if_node_becomes_not_ignored() { eventBus.fire(new DistanceEvent(NodeDistance.LOCAL, node2)); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool2, pool3); + await() + .untilAsserted( + () -> + assertThat(((DefaultSession) session).getPools()) + .containsValues(pool1, pool2, pool3)); } @Test @@ -530,14 +523,15 @@ public void should_remove_pool_if_node_is_forced_down() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); verify(pool2, timeout(500)).closeAsync(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); + await() + .untilAsserted( + () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); } @Test @@ -560,15 +554,17 @@ public void should_recreate_pool_if_node_is_forced_back_up() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool2, pool3); + await() + .untilAsserted( + () -> + assertThat(((DefaultSession) session).getPools()) + .containsValues(pool1, pool2, pool3)); } @Test @@ -577,7 +573,6 @@ public void should_not_recreate_pool_if_node_is_forced_back_up_but_ignored() { when(node2.getDistance()).thenReturn(NodeDistance.IGNORED); ChannelPool pool1 = mockPool(node1); - ChannelPool pool2 = mockPool(node2); ChannelPool pool3 = mockPool(node3); MockChannelPoolFactoryHelper factoryHelper = MockChannelPoolFactoryHelper.builder(channelPoolFactory) @@ -590,15 +585,15 @@ public void should_not_recreate_pool_if_node_is_forced_back_up_but_ignored() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); - waitForPendingAdminTasks(); + await() + .untilAsserted( + () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); factoryHelper.verifyNoMoreCalls(); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); } @Test @@ -622,7 +617,6 @@ public void should_adjust_distance_if_changed_while_recreating() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); @@ -636,12 +630,14 @@ public void should_adjust_distance_if_changed_while_recreating() { // Now pool init succeeds pool2Future.complete(pool2); - waitForPendingAdminTasks(); // Pool should have been adjusted - verify(pool2).resize(NodeDistance.REMOTE); - - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool2, pool3); + verify(pool2, VERIFY_TIMEOUT).resize(NodeDistance.REMOTE); + await() + .untilAsserted( + () -> + assertThat(((DefaultSession) session).getPools()) + .containsValues(pool1, pool2, pool3)); } @Test @@ -665,7 +661,6 @@ public void should_remove_pool_if_ignored_while_recreating() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); @@ -679,12 +674,13 @@ public void should_remove_pool_if_ignored_while_recreating() { // Now pool init succeeds pool2Future.complete(pool2); - waitForPendingAdminTasks(); // Pool should have been closed - verify(pool2).closeAsync(); + verify(pool2, VERIFY_TIMEOUT).closeAsync(); - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); + await() + .untilAsserted( + () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); } @Test @@ -708,7 +704,6 @@ public void should_remove_pool_if_forced_down_while_recreating() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); @@ -722,12 +717,12 @@ public void should_remove_pool_if_forced_down_while_recreating() { // Now pool init succeeds pool2Future.complete(pool2); - waitForPendingAdminTasks(); // Pool should have been closed - verify(pool2).closeAsync(); - - assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); + verify(pool2, VERIFY_TIMEOUT).closeAsync(); + await() + .untilAsserted( + () -> assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3)); } @Test @@ -747,17 +742,15 @@ public void should_close_all_pools_when_closing() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); CompletionStage closeFuture = session.closeAsync(); - waitForPendingAdminTasks(); assertThatStage(closeFuture).isSuccess(); - verify(pool1).closeAsync(); - verify(pool2).closeAsync(); - verify(pool3).closeAsync(); + verify(pool1, VERIFY_TIMEOUT).closeAsync(); + verify(pool2, VERIFY_TIMEOUT).closeAsync(); + verify(pool3, VERIFY_TIMEOUT).closeAsync(); } @Test @@ -777,17 +770,15 @@ public void should_force_close_all_pools_when_force_closing() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); CompletionStage closeFuture = session.forceCloseAsync(); - waitForPendingAdminTasks(); assertThatStage(closeFuture).isSuccess(); - verify(pool1).forceCloseAsync(); - verify(pool2).forceCloseAsync(); - verify(pool3).forceCloseAsync(); + verify(pool1, VERIFY_TIMEOUT).forceCloseAsync(); + verify(pool2, VERIFY_TIMEOUT).forceCloseAsync(); + verify(pool3, VERIFY_TIMEOUT).forceCloseAsync(); } @Test @@ -811,7 +802,6 @@ public void should_close_pool_if_recreated_while_closing() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); assertThat(((DefaultSession) session).getPools()).containsValues(pool1, pool3); @@ -822,15 +812,13 @@ public void should_close_pool_if_recreated_while_closing() { // but the session gets closed before pool init completes CompletionStage closeFuture = session.closeAsync(); - waitForPendingAdminTasks(); assertThatStage(closeFuture).isSuccess(); // now pool init completes pool2Future.complete(pool2); - waitForPendingAdminTasks(); // Pool should have been closed - verify(pool2).forceCloseAsync(); + verify(pool2, VERIFY_TIMEOUT).forceCloseAsync(); } @Test @@ -850,17 +838,15 @@ public void should_set_keyspace_on_all_pools() { factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node2, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); Session session = CompletableFutures.getCompleted(initFuture.toCompletableFuture()); CqlIdentifier newKeyspace = CqlIdentifier.fromInternal("newKeyspace"); ((DefaultSession) session).setKeyspace(newKeyspace); - waitForPendingAdminTasks(); - verify(pool1).setKeyspace(newKeyspace); - verify(pool2).setKeyspace(newKeyspace); - verify(pool3).setKeyspace(newKeyspace); + verify(pool1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); + verify(pool2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); + verify(pool3, VERIFY_TIMEOUT).setKeyspace(newKeyspace); } @Test @@ -884,7 +870,6 @@ public void should_set_keyspace_on_pool_if_recreated_while_switching_keyspace() factoryHelper.waitForCall(node1, KEYSPACE, NodeDistance.LOCAL); factoryHelper.waitForCall(node3, KEYSPACE, NodeDistance.LOCAL); - waitForPendingAdminTasks(); assertThatStage(initFuture).isSuccess(); DefaultSession session = (DefaultSession) CompletableFutures.getCompleted(initFuture.toCompletableFuture()); @@ -897,16 +882,14 @@ public void should_set_keyspace_on_pool_if_recreated_while_switching_keyspace() // Keyspace gets changed on the session in the meantime, node2's pool will miss it CqlIdentifier newKeyspace = CqlIdentifier.fromInternal("newKeyspace"); session.setKeyspace(newKeyspace); - waitForPendingAdminTasks(); - verify(pool1).setKeyspace(newKeyspace); - verify(pool3).setKeyspace(newKeyspace); + verify(pool1, VERIFY_TIMEOUT).setKeyspace(newKeyspace); + verify(pool3, VERIFY_TIMEOUT).setKeyspace(newKeyspace); // now pool init completes pool2Future.complete(pool2); - waitForPendingAdminTasks(); // Pool should have been closed - verify(pool2).setKeyspace(newKeyspace); + verify(pool2, VERIFY_TIMEOUT).setKeyspace(newKeyspace); } private ChannelPool mockPool(Node node) { @@ -946,17 +929,4 @@ private static DefaultNode mockLocalNode(int i) { when(node.toString()).thenReturn("node" + i); return node; } - - // Wait for all the tasks on the pool's admin executor to complete. - private void waitForPendingAdminTasks() { - // This works because the event loop group is single-threaded - Future f = adminEventLoopGroup.schedule(() -> null, 5, TimeUnit.NANOSECONDS); - try { - Uninterruptibles.getUninterruptibly(f, 250, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - fail("unexpected error", e.getCause()); - } catch (TimeoutException e) { - fail("timed out while waiting for admin tasks to complete", e); - } - } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java index 4aa7e414939..6c3dc7f3689 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/MockChannelPoolFactoryHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java new file mode 100644 index 00000000000..60483da4c72 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/PoolManagerTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.session; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.EventBus; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.context.NettyOptions; +import io.netty.channel.DefaultEventLoopGroup; +import java.util.concurrent.ConcurrentHashMap; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class PoolManagerTest { + @Mock private InternalDriverContext context; + @Mock private NettyOptions nettyOptions; + @Mock private DriverConfig config; + @Mock private DriverExecutionProfile defaultProfile; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + + DefaultEventLoopGroup adminEventLoopGroup = new DefaultEventLoopGroup(1); + when(nettyOptions.adminEventExecutorGroup()).thenReturn(adminEventLoopGroup); + when(context.getNettyOptions()).thenReturn(nettyOptions); + when(context.getEventBus()).thenReturn(new EventBus("test")); + when(config.getDefaultProfile()).thenReturn(defaultProfile); + when(context.getConfig()).thenReturn(config); + } + + @Test + public void should_use_weak_values_if_config_is_true_or_undefined() { + when(defaultProfile.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) + .thenReturn(true); + // As weak values map class is MapMakerInternalMap + assertThat(new PoolManager(context).getRepreparePayloads()) + .isNotInstanceOf(ConcurrentHashMap.class); + } + + @Test + public void should_not_use_weak_values_if_config_is_false() { + when(defaultProfile.getBoolean(DefaultDriverOption.PREPARED_CACHE_WEAK_VALUES, true)) + .thenReturn(false); + assertThat(new PoolManager(context).getRepreparePayloads()) + .isInstanceOf(ConcurrentHashMap.class); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java index 6bb875d1dbd..555ed2e8806 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/ReprepareOnUpTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,7 +45,8 @@ import com.datastax.oss.protocol.internal.response.result.Rows; import com.datastax.oss.protocol.internal.response.result.RowsMetadata; import com.datastax.oss.protocol.internal.util.Bytes; -import io.netty.channel.EventLoop; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.ImmediateEventExecutor; import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayDeque; @@ -61,7 +64,6 @@ public class ReprepareOnUpTest { @Mock private ChannelPool pool; @Mock private DriverChannel channel; - @Mock private EventLoop eventLoop; @Mock private InternalDriverContext context; @Mock private DriverConfig config; @Mock private DriverExecutionProfile defaultProfile; @@ -76,8 +78,6 @@ public void setup() { MockitoAnnotations.initMocks(this); when(pool.next()).thenReturn(channel); - when(channel.eventLoop()).thenReturn(eventLoop); - when(eventLoop.inEventLoop()).thenReturn(true); when(config.getDefaultProfile()).thenReturn(defaultProfile); when(defaultProfile.getBoolean(DefaultDriverOption.REPREPARE_CHECK_SYSTEM_TABLE)) @@ -99,21 +99,13 @@ public void setup() { public void should_complete_immediately_if_no_prepared_statements() { // Given MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp("test", pool, getMockPayloads(/*none*/ ), context, whenPrepared); - - // When - reprepareOnUp.start(); - - // Then - assertThatStage(done).isSuccess(v -> assertThat(reprepareOnUp.queries).isEmpty()); - } - - @Test - public void should_complete_immediately_if_pool_empty() { - // Given - when(pool.next()).thenReturn(null); - MockReprepareOnUp reprepareOnUp = - new MockReprepareOnUp("test", pool, getMockPayloads('a'), context, whenPrepared); + new MockReprepareOnUp( + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads(/*none*/ ), + context, + whenPrepared); // When reprepareOnUp.start(); @@ -126,11 +118,17 @@ public void should_complete_immediately_if_pool_empty() { public void should_reprepare_all_if_system_table_query_fails() { MockReprepareOnUp reprepareOnUp = new MockReprepareOnUp( - "test", pool, getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), context, whenPrepared); + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), + context, + whenPrepared); reprepareOnUp.start(); MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Query.class); assertThat(((Query) adminQuery.request).query) .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); @@ -138,6 +136,7 @@ public void should_reprepare_all_if_system_table_query_fails() { for (char c = 'a'; c <= 'f'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -150,11 +149,17 @@ public void should_reprepare_all_if_system_table_query_fails() { public void should_reprepare_all_if_system_table_empty() { MockReprepareOnUp reprepareOnUp = new MockReprepareOnUp( - "test", pool, getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), context, whenPrepared); + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), + context, + whenPrepared); reprepareOnUp.start(); MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Query.class); assertThat(((Query) adminQuery.request).query) .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); @@ -164,6 +169,7 @@ public void should_reprepare_all_if_system_table_empty() { for (char c = 'a'; c <= 'f'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -179,13 +185,19 @@ public void should_reprepare_all_if_system_query_disabled() { MockReprepareOnUp reprepareOnUp = new MockReprepareOnUp( - "test", pool, getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), context, whenPrepared); + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), + context, + whenPrepared); reprepareOnUp.start(); MockAdminQuery adminQuery; for (char c = 'a'; c <= 'f'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -198,11 +210,17 @@ public void should_reprepare_all_if_system_query_disabled() { public void should_not_reprepare_already_known_statements() { MockReprepareOnUp reprepareOnUp = new MockReprepareOnUp( - "test", pool, getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), context, whenPrepared); + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), + context, + whenPrepared); reprepareOnUp.start(); MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Query.class); assertThat(((Query) adminQuery.request).query) .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); @@ -212,6 +230,7 @@ public void should_not_reprepare_already_known_statements() { for (char c = 'a'; c <= 'c'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -240,11 +259,17 @@ public void should_limit_number_of_statements_to_reprepare() { MockReprepareOnUp reprepareOnUp = new MockReprepareOnUp( - "test", pool, getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), context, whenPrepared); + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), + context, + whenPrepared); reprepareOnUp.start(); MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Query.class); assertThat(((Query) adminQuery.request).query) .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); @@ -254,6 +279,7 @@ public void should_limit_number_of_statements_to_reprepare() { for (char c = 'a'; c <= 'c'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -268,11 +294,17 @@ public void should_limit_number_of_statements_reprepared_in_parallel() { MockReprepareOnUp reprepareOnUp = new MockReprepareOnUp( - "test", pool, getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), context, whenPrepared); + "test", + pool, + ImmediateEventExecutor.INSTANCE, + getMockPayloads('a', 'b', 'c', 'd', 'e', 'f'), + context, + whenPrepared); reprepareOnUp.start(); MockAdminQuery adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Query.class); assertThat(((Query) adminQuery.request).query) .isEqualTo("SELECT prepared_id FROM system.prepared_statements"); @@ -286,6 +318,7 @@ public void should_limit_number_of_statements_reprepared_in_parallel() { // As we complete each statement, another one should enqueue: for (char c = 'a'; c <= 'c'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -295,6 +328,7 @@ public void should_limit_number_of_statements_reprepared_in_parallel() { // Complete the last 3: for (char c = 'd'; c <= 'f'; c++) { adminQuery = reprepareOnUp.queries.poll(); + assertThat(adminQuery).isNotNull(); assertThat(adminQuery.request).isInstanceOf(Prepare.class); assertThat(((Prepare) adminQuery.request).cqlQuery).isEqualTo("mock query " + c); adminQuery.resultFuture.complete(null); @@ -321,10 +355,11 @@ private static class MockReprepareOnUp extends ReprepareOnUp { MockReprepareOnUp( String logPrefix, ChannelPool pool, + EventExecutor adminExecutor, Map repreparePayloads, InternalDriverContext context, Runnable whenPrepared) { - super(logPrefix, pool, repreparePayloads, context, whenPrepared); + super(logPrefix, pool, adminExecutor, repreparePayloads, context, whenPrepared); } @Override @@ -334,15 +369,24 @@ protected CompletionStage queryAsync( queries.add(new MockAdminQuery(message, resultFuture)); return resultFuture; } + + @Override + protected CompletionStage prepareAsync( + Message message, Map customPayload) { + CompletableFuture resultFuture = new CompletableFuture<>(); + queries.add(new MockAdminQuery(message, resultFuture)); + return resultFuture; + } } private static class MockAdminQuery { private final Message request; - private final CompletableFuture resultFuture; + private final CompletableFuture resultFuture; - public MockAdminQuery(Message request, CompletableFuture resultFuture) { + @SuppressWarnings("unchecked") + public MockAdminQuery(Message request, CompletableFuture resultFuture) { this.request = request; - this.resultFuture = resultFuture; + this.resultFuture = (CompletableFuture) resultFuture; } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java index a9f4233513b..7eb682070cd 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/ConcurrencyLimitingRequestThrottlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,6 +29,7 @@ import com.datastax.oss.driver.api.core.session.throttling.Throttled; import com.datastax.oss.driver.shaded.guava.common.collect.Lists; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.function.Consumer; import org.junit.Before; import org.junit.Test; @@ -65,7 +68,7 @@ public void should_start_immediately_when_under_capacity() { throttler.register(request); // Then - assertThatStage(request.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); assertThat(throttler.getConcurrentRequests()).isEqualTo(1); assertThat(throttler.getQueue()).isEmpty(); } @@ -86,12 +89,17 @@ public void should_allow_new_request_when_active_one_times_out() { should_allow_new_request_when_active_one_completes(throttler::signalTimeout); } + @Test + public void should_allow_new_request_when_active_one_canceled() { + should_allow_new_request_when_active_one_completes(throttler::signalCancel); + } + private void should_allow_new_request_when_active_one_completes( Consumer completeCallback) { // Given MockThrottled first = new MockThrottled(); throttler.register(first); - assertThatStage(first.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); for (int i = 0; i < 4; i++) { // fill to capacity throttler.register(new MockThrottled()); } @@ -106,7 +114,7 @@ private void should_allow_new_request_when_active_one_completes( throttler.register(incoming); // Then - assertThatStage(incoming.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + assertThatStage(incoming.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); assertThat(throttler.getConcurrentRequests()).isEqualTo(5); assertThat(throttler.getQueue()).isEmpty(); } @@ -125,7 +133,7 @@ public void should_enqueue_when_over_capacity() { throttler.register(incoming); // Then - assertThatStage(incoming.started).isNotDone(); + assertThatStage(incoming.ended).isNotDone(); assertThat(throttler.getConcurrentRequests()).isEqualTo(5); assertThat(throttler.getQueue()).containsExactly(incoming); } @@ -150,20 +158,20 @@ private void should_dequeue_when_active_completes(Consumer completeCa // Given MockThrottled first = new MockThrottled(); throttler.register(first); - assertThatStage(first.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); for (int i = 0; i < 4; i++) { throttler.register(new MockThrottled()); } MockThrottled incoming = new MockThrottled(); throttler.register(incoming); - assertThatStage(incoming.started).isNotDone(); + assertThatStage(incoming.ended).isNotDone(); // When completeCallback.accept(first); // Then - assertThatStage(incoming.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); + assertThatStage(incoming.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); assertThat(throttler.getConcurrentRequests()).isEqualTo(5); assertThat(throttler.getQueue()).isEmpty(); } @@ -182,7 +190,7 @@ public void should_reject_when_queue_is_full() { throttler.register(incoming); // Then - assertThatStage(incoming.started) + assertThatStage(incoming.ended) .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); } @@ -201,7 +209,7 @@ public void should_remove_timed_out_request_from_queue() { throttler.signalTimeout(queued1); // Then - assertThatStage(queued2.started).isNotDone(); + assertThatStage(queued2.ended).isNotDone(); assertThat(throttler.getConcurrentRequests()).isEqualTo(5); assertThat(throttler.getQueue()).hasSize(1); } @@ -216,7 +224,7 @@ public void should_reject_enqueued_when_closing() { for (int i = 0; i < 10; i++) { MockThrottled request = new MockThrottled(); throttler.register(request); - assertThatStage(request.started).isNotDone(); + assertThatStage(request.ended).isNotDone(); enqueued.add(request); } @@ -225,7 +233,7 @@ public void should_reject_enqueued_when_closing() { // Then for (MockThrottled request : enqueued) { - assertThatStage(request.started) + assertThatStage(request.ended) .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); } @@ -234,7 +242,125 @@ public void should_reject_enqueued_when_closing() { throttler.register(request); // Then - assertThatStage(request.started) + assertThatStage(request.ended) .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); } + + @Test + public void should_run_throttle_callbacks_concurrently() throws InterruptedException { + // Given + + // a task is enqueued, which when in onThrottleReady, will stall latch countDown()ed + // register() should automatically start onThrottleReady on same thread + + // start a parallel thread + CountDownLatch firstRelease = new CountDownLatch(1); + MockThrottled first = new MockThrottled(firstRelease); + Runnable r = + () -> { + throttler.register(first); + first.ended.toCompletableFuture().thenRun(() -> throttler.signalSuccess(first)); + }; + Thread t = new Thread(r); + t.start(); + + // wait for the registration threads to reach await state + assertThatStage(first.started).isSuccess(); + assertThatStage(first.ended).isNotDone(); + + // When + // we concurrently submit a second shorter task + MockThrottled second = new MockThrottled(); + // (on a second thread, so that we can join and force a timeout in case + // registration is delayed) + Thread t2 = new Thread(() -> throttler.register(second)); + t2.start(); + t2.join(1_000); + + // Then + // registration will trigger callback, should complete ~immediately + assertThatStage(second.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + // first should still be unfinished + assertThatStage(first.started).isDone(); + assertThatStage(first.ended).isNotDone(); + // now finish, and verify + firstRelease.countDown(); + assertThatStage(first.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + + t.join(1_000); + } + + @Test + public void should_enqueue_tasks_quickly_when_callbacks_blocked() throws InterruptedException { + // Given + + // Multiple tasks are registered, up to the limit, and proceed into their + // callback + + // start five parallel threads + final int THREADS = 5; + Thread[] threads = new Thread[THREADS]; + CountDownLatch[] latches = new CountDownLatch[THREADS]; + MockThrottled[] throttled = new MockThrottled[THREADS]; + for (int i = 0; i < threads.length; i++) { + latches[i] = new CountDownLatch(1); + final MockThrottled itThrottled = new MockThrottled(latches[i]); + throttled[i] = itThrottled; + threads[i] = + new Thread( + () -> { + throttler.register(itThrottled); + itThrottled + .ended + .toCompletableFuture() + .thenRun(() -> throttler.signalSuccess(itThrottled)); + }); + threads[i].start(); + } + + // wait for the registration threads to be launched + // they are all waiting now + for (int i = 0; i < throttled.length; i++) { + assertThatStage(throttled[i].started).isSuccess(); + assertThatStage(throttled[i].ended).isNotDone(); + } + + // When + // we concurrently submit another task + MockThrottled last = new MockThrottled(); + throttler.register(last); + + // Then + // registration will enqueue the callback, and it should not + // take any time to proceed (ie: we should not be blocked) + // and there should be an element in the queue + assertThatStage(last.started).isNotDone(); + assertThatStage(last.ended).isNotDone(); + assertThat(throttler.getQueue()).containsExactly(last); + + // we still have not released, so old throttled threads should be waiting + for (int i = 0; i < throttled.length; i++) { + assertThatStage(throttled[i].started).isDone(); + assertThatStage(throttled[i].ended).isNotDone(); + } + + // now let us release .. + for (int i = 0; i < latches.length; i++) { + latches[i].countDown(); + } + + // .. and check everything finished up OK + for (int i = 0; i < latches.length; i++) { + assertThatStage(throttled[i].started).isSuccess(); + assertThatStage(throttled[i].ended).isSuccess(); + } + + // for good measure, we will also wait for the enqueued to complete + assertThatStage(last.started).isSuccess(); + assertThatStage(last.ended).isSuccess(); + + for (int i = 0; i < threads.length; i++) { + threads[i].join(1_000); + } + } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java index ab723b150b0..9e54e3d511f 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/MockThrottled.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,21 +19,45 @@ import com.datastax.oss.driver.api.core.RequestThrottlingException; import com.datastax.oss.driver.api.core.session.throttling.Throttled; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; import edu.umd.cs.findbugs.annotations.NonNull; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import java.util.concurrent.CountDownLatch; class MockThrottled implements Throttled { + final CompletionStage started = new CompletableFuture<>(); + final CompletionStage ended = new CompletableFuture<>(); + final CountDownLatch canRelease; + + public MockThrottled() { + this(new CountDownLatch(0)); + } - final CompletionStage started = new CompletableFuture<>(); + /* + * The releaseLatch can be provided to add some delay before the + * task readiness/fail callbacks complete. This can be used, eg, to + * imitate a slow callback. + */ + public MockThrottled(CountDownLatch releaseLatch) { + this.canRelease = releaseLatch; + } @Override public void onThrottleReady(boolean wasDelayed) { - started.toCompletableFuture().complete(wasDelayed); + started.toCompletableFuture().complete(null); + awaitRelease(); + ended.toCompletableFuture().complete(wasDelayed); } @Override public void onThrottleFailure(@NonNull RequestThrottlingException error) { - started.toCompletableFuture().completeExceptionally(error); + started.toCompletableFuture().complete(null); + awaitRelease(); + ended.toCompletableFuture().completeExceptionally(error); + } + + private void awaitRelease() { + Uninterruptibles.awaitUninterruptibly(canRelease); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java index 26b52403e8f..1e15610bf7b 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/RateLimitingRequestThrottlerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +25,7 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfig; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.session.throttling.Throttled; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.context.NettyOptions; import com.datastax.oss.driver.internal.core.util.concurrent.ScheduledTaskCapturingEventLoop; @@ -31,6 +34,7 @@ import java.time.Duration; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -94,7 +98,7 @@ public void should_start_immediately_when_under_capacity() { throttler.register(request); // Then - assertThatStage(request.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); assertThat(throttler.getStoredPermits()).isEqualTo(4); assertThat(throttler.getQueue()).isEmpty(); } @@ -113,7 +117,7 @@ public void should_allow_new_request_when_under_rate() { throttler.register(request); // Then - assertThatStage(request.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); + assertThatStage(request.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isFalse()); assertThat(throttler.getStoredPermits()).isEqualTo(0); assertThat(throttler.getQueue()).isEmpty(); } @@ -132,7 +136,7 @@ public void should_enqueue_when_over_rate() { throttler.register(request); // Then - assertThatStage(request.started).isNotDone(); + assertThatStage(request.ended).isNotDone(); assertThat(throttler.getStoredPermits()).isEqualTo(0); assertThat(throttler.getQueue()).containsExactly(request); @@ -156,12 +160,21 @@ public void should_reject_when_queue_is_full() { throttler.register(request); // Then - assertThatStage(request.started) + assertThatStage(request.ended) .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); } @Test public void should_remove_timed_out_request_from_queue() { + testRemoveInvalidEventFromQueue(throttler::signalTimeout); + } + + @Test + public void should_remove_cancel_request_from_queue() { + testRemoveInvalidEventFromQueue(throttler::signalCancel); + } + + private void testRemoveInvalidEventFromQueue(Consumer completeCallback) { // Given for (int i = 0; i < 5; i++) { throttler.register(new MockThrottled()); @@ -172,10 +185,10 @@ public void should_remove_timed_out_request_from_queue() { throttler.register(queued2); // When - throttler.signalTimeout(queued1); + completeCallback.accept(queued1); // Then - assertThatStage(queued2.started).isNotDone(); + assertThatStage(queued2.ended).isNotDone(); assertThat(throttler.getStoredPermits()).isEqualTo(0); assertThat(throttler.getQueue()).containsExactly(queued2); } @@ -189,10 +202,10 @@ public void should_dequeue_when_draining_task_runs() { MockThrottled queued1 = new MockThrottled(); throttler.register(queued1); - assertThatStage(queued1.started).isNotDone(); + assertThatStage(queued1.ended).isNotDone(); MockThrottled queued2 = new MockThrottled(); throttler.register(queued2); - assertThatStage(queued2.started).isNotDone(); + assertThatStage(queued2.ended).isNotDone(); assertThat(throttler.getStoredPermits()).isEqualTo(0); assertThat(throttler.getQueue()).hasSize(2); @@ -217,8 +230,8 @@ public void should_dequeue_when_draining_task_runs() { task.run(); // Then - assertThatStage(queued1.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); - assertThatStage(queued2.started).isNotDone(); + assertThatStage(queued1.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); + assertThatStage(queued2.ended).isNotDone(); assertThat(throttler.getStoredPermits()).isEqualTo(0); assertThat(throttler.getQueue()).containsExactly(queued2); // task reschedules itself since it did not empty the queue @@ -231,7 +244,7 @@ public void should_dequeue_when_draining_task_runs() { task.run(); // Then - assertThatStage(queued2.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); + assertThatStage(queued2.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); assertThat(throttler.getStoredPermits()).isEqualTo(0); assertThat(throttler.getQueue()).isEmpty(); assertThat(adminExecutor.nextTask()).isNull(); @@ -273,14 +286,14 @@ public void should_keep_accumulating_time_if_no_permits_created() { // Then MockThrottled queued = new MockThrottled(); throttler.register(queued); - assertThatStage(queued.started).isNotDone(); + assertThatStage(queued.ended).isNotDone(); // When clock.add(ONE_HUNDRED_MILLISECONDS); adminExecutor.nextTask().run(); // Then - assertThatStage(queued.started).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); + assertThatStage(queued.ended).isSuccess(wasDelayed -> assertThat(wasDelayed).isTrue()); } @Test @@ -293,7 +306,7 @@ public void should_reject_enqueued_when_closing() { for (int i = 0; i < 10; i++) { MockThrottled request = new MockThrottled(); throttler.register(request); - assertThatStage(request.started).isNotDone(); + assertThatStage(request.ended).isNotDone(); enqueued.add(request); } @@ -302,7 +315,7 @@ public void should_reject_enqueued_when_closing() { // Then for (MockThrottled request : enqueued) { - assertThatStage(request.started) + assertThatStage(request.ended) .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); } @@ -311,7 +324,7 @@ public void should_reject_enqueued_when_closing() { throttler.register(request); // Then - assertThatStage(request.started) + assertThatStage(request.ended) .isFailed(error -> assertThat(error).isInstanceOf(RequestThrottlingException.class)); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java index b12fbf35582..1489d1da345 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/session/throttling/SettableNanoClock.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +27,7 @@ public long nanoTime() { } // This is racy, but in our tests it's never read concurrently - @SuppressWarnings("NonAtomicVolatileUpdate") + @SuppressWarnings({"NonAtomicVolatileUpdate", "NonAtomicOperationOnVolatileField"}) void add(long increment) { nanoTime += increment; } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java new file mode 100644 index 00000000000..d07b45c21df --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/ssl/ReloadingKeyManagerFactoryTest.java @@ -0,0 +1,270 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.ssl; + +import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +import java.io.IOException; +import java.io.InputStream; +import java.math.BigInteger; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.SocketException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.cert.X509Certificate; +import java.util.Optional; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Supplier; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLServerSocket; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.TrustManagerFactory; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ReloadingKeyManagerFactoryTest { + private static final Logger logger = + LoggerFactory.getLogger(ReloadingKeyManagerFactoryTest.class); + + static final Path CERT_BASE = + Paths.get( + ReloadingKeyManagerFactoryTest.class + .getResource( + String.format("/%s/certs/", ReloadingKeyManagerFactoryTest.class.getSimpleName())) + .getPath()); + static final Path SERVER_KEYSTORE_PATH = CERT_BASE.resolve("server.keystore"); + static final Path SERVER_TRUSTSTORE_PATH = CERT_BASE.resolve("server.truststore"); + + static final Path ORIGINAL_CLIENT_KEYSTORE_PATH = CERT_BASE.resolve("client-original.keystore"); + static final Path ALTERNATE_CLIENT_KEYSTORE_PATH = CERT_BASE.resolve("client-alternate.keystore"); + static final BigInteger ORIGINAL_CLIENT_KEYSTORE_CERT_SERIAL = + convertSerial("7372a966"); // 1936894310 + static final BigInteger ALTERNATE_CLIENT_KEYSTORE_CERT_SERIAL = + convertSerial("e50bf31"); // 240172849 + + // File at this path will change content + static final Path TMP_CLIENT_KEYSTORE_PATH; + + static { + try { + TMP_CLIENT_KEYSTORE_PATH = + Files.createTempFile(ReloadingKeyManagerFactoryTest.class.getSimpleName(), null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + static final Path CLIENT_TRUSTSTORE_PATH = CERT_BASE.resolve("client.truststore"); + static final String CERTSTORE_PASSWORD = "changeit"; + + private static TrustManagerFactory buildTrustManagerFactory() { + TrustManagerFactory tmf; + try (InputStream tsf = Files.newInputStream(CLIENT_TRUSTSTORE_PATH)) { + KeyStore ts = KeyStore.getInstance("JKS"); + char[] password = CERTSTORE_PASSWORD.toCharArray(); + ts.load(tsf, password); + tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ts); + } catch (Exception e) { + throw new RuntimeException(e); + } + return tmf; + } + + private static SSLContext buildServerSslContext() { + try { + SSLContext context = SSLContext.getInstance("SSL"); + + TrustManagerFactory tmf; + try (InputStream tsf = Files.newInputStream(SERVER_TRUSTSTORE_PATH)) { + KeyStore ts = KeyStore.getInstance("JKS"); + char[] password = CERTSTORE_PASSWORD.toCharArray(); + ts.load(tsf, password); + tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ts); + } + + KeyManagerFactory kmf; + try (InputStream ksf = Files.newInputStream(SERVER_KEYSTORE_PATH)) { + KeyStore ks = KeyStore.getInstance("JKS"); + char[] password = CERTSTORE_PASSWORD.toCharArray(); + ks.load(ksf, password); + kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(ks, password); + } + + context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + return context; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Test + public void client_certificates_should_reload() throws Exception { + Files.copy( + ORIGINAL_CLIENT_KEYSTORE_PATH, TMP_CLIENT_KEYSTORE_PATH, REPLACE_EXISTING, COPY_ATTRIBUTES); + + final BlockingQueue> peerCertificates = + new LinkedBlockingQueue<>(1); + + // Create a listening socket. Make sure there's no backlog so each accept is in order. + SSLContext serverSslContext = buildServerSslContext(); + final SSLServerSocket server = + (SSLServerSocket) serverSslContext.getServerSocketFactory().createServerSocket(); + server.bind(new InetSocketAddress(0), 1); + server.setUseClientMode(false); + server.setNeedClientAuth(true); + Thread serverThread = + new Thread( + () -> { + while (true) { + try { + logger.info("Server accepting client"); + final SSLSocket conn = (SSLSocket) server.accept(); + logger.info("Server accepted client {}", conn); + conn.addHandshakeCompletedListener( + event -> { + boolean offer; + try { + // Transfer certificates to client thread once handshake is complete, so + // it can safely close + // the socket + offer = + peerCertificates.offer( + Optional.of((X509Certificate[]) event.getPeerCertificates())); + } catch (SSLPeerUnverifiedException e) { + offer = peerCertificates.offer(Optional.empty()); + } + Assert.assertTrue(offer); + }); + logger.info("Server starting handshake"); + // Without this, client handshake blocks + conn.startHandshake(); + } catch (IOException e) { + // Not sure why I sometimes see ~thousands of these locally + if (e instanceof SocketException && e.getMessage().contains("Socket closed")) + return; + logger.info("Server accept error", e); + } + } + }); + serverThread.setName(String.format("%s-serverThread", this.getClass().getSimpleName())); + serverThread.setDaemon(true); + serverThread.start(); + + final ReloadingKeyManagerFactory kmf = + ReloadingKeyManagerFactory.create( + TMP_CLIENT_KEYSTORE_PATH, CERTSTORE_PASSWORD, Optional.empty()); + // Need a tmf that tells the server to send its certs + final TrustManagerFactory tmf = buildTrustManagerFactory(); + + // Check original client certificate + testClientCertificates( + kmf, + tmf, + server.getLocalSocketAddress(), + () -> { + try { + return peerCertificates.poll(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }, + certs -> { + Assert.assertEquals(1, certs.length); + X509Certificate cert = certs[0]; + Assert.assertEquals(ORIGINAL_CLIENT_KEYSTORE_CERT_SERIAL, cert.getSerialNumber()); + }); + + // Update keystore content + logger.info("Updating keystore file with new content"); + Files.copy( + ALTERNATE_CLIENT_KEYSTORE_PATH, + TMP_CLIENT_KEYSTORE_PATH, + REPLACE_EXISTING, + COPY_ATTRIBUTES); + kmf.reload(); + + // Check that alternate client certificate was applied + testClientCertificates( + kmf, + tmf, + server.getLocalSocketAddress(), + () -> { + try { + return peerCertificates.poll(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }, + certs -> { + Assert.assertEquals(1, certs.length); + X509Certificate cert = certs[0]; + Assert.assertEquals(ALTERNATE_CLIENT_KEYSTORE_CERT_SERIAL, cert.getSerialNumber()); + }); + + kmf.close(); + server.close(); + } + + private static void testClientCertificates( + KeyManagerFactory kmf, + TrustManagerFactory tmf, + SocketAddress serverAddress, + Supplier> certsSupplier, + Consumer certsConsumer) + throws NoSuchAlgorithmException, KeyManagementException, IOException { + SSLContext clientSslContext = SSLContext.getInstance("TLS"); + clientSslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); + final SSLSocket client = (SSLSocket) clientSslContext.getSocketFactory().createSocket(); + logger.info("Client connecting"); + client.connect(serverAddress); + logger.info("Client doing handshake"); + client.startHandshake(); + + final Optional lastCertificate = certsSupplier.get(); + logger.info("Client got its certificate back from the server; closing socket"); + client.close(); + Assert.assertNotNull(lastCertificate); + Assert.assertTrue(lastCertificate.isPresent()); + logger.info("Client got its certificate back from server: {}", lastCertificate); + + certsConsumer.accept(lastCertificate.get()); + } + + private static BigInteger convertSerial(String hex) { + final BigInteger serial = new BigInteger(Integer.valueOf(hex, 16).toString()); + logger.info("Serial hex {} is {}", hex, serial); + return serial; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java index fa4adec9e6c..f1827eb8a86 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/time/AtomicTimestampGeneratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java index 59324205872..7074dd4ccc2 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/time/MonotonicTimestampGeneratorTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java index 6de3a2b5e41..5d9ed8b2ceb 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/time/ThreadLocalTimestampGeneratorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java new file mode 100644 index 00000000000..8dcad99b459 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/MultiplexingRequestTrackerTest.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.tracker; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; +import static org.mockito.BDDMockito.willThrow; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import com.datastax.oss.driver.api.core.DriverExecutionException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; + +@RunWith(MockitoJUnitRunner.Strict.class) +public class MultiplexingRequestTrackerTest { + + @Mock private RequestTracker child1; + @Mock private RequestTracker child2; + @Mock private Request request; + @Mock private DriverExecutionProfile profile; + @Mock private Node node; + @Mock private Session session; + + @Mock private Appender appender; + @Captor private ArgumentCaptor loggingEventCaptor; + + private Logger logger; + private Level initialLogLevel; + + private final Exception error = new DriverExecutionException(new NullPointerException()); + + @Before + public void addAppenders() { + logger = (Logger) LoggerFactory.getLogger(MultiplexingRequestTracker.class); + initialLogLevel = logger.getLevel(); + logger.setLevel(Level.WARN); + logger.addAppender(appender); + } + + @After + public void removeAppenders() { + logger.detachAppender(appender); + logger.setLevel(initialLogLevel); + } + + @Test + public void should_register() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(); + // when + tracker.register(child1); + tracker.register(child2); + // then + assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_flatten_child_multiplexing_tracker_via_constructor() { + // given + MultiplexingRequestTracker tracker = + new MultiplexingRequestTracker(new MultiplexingRequestTracker(child1, child2)); + // when + // then + assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_flatten_child_multiplexing_tracker_via_register() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(); + // when + tracker.register(new MultiplexingRequestTracker(child1, child2)); + // then + assertThat(tracker).extracting("trackers").asList().hasSize(2).contains(child1, child2); + } + + @Test + public void should_notify_onSuccess() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); + willThrow(new NullPointerException()) + .given(child1) + .onSuccess(request, 123456L, profile, node, "test"); + // when + tracker.onSuccess(request, 123456L, profile, node, "test"); + // then + verify(child1).onSuccess(request, 123456L, profile, node, "test"); + verify(child2).onSuccess(request, 123456L, profile, node, "test"); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "[test] Unexpected error while notifying request tracker child1 of an onSuccess event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onError() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); + willThrow(new NullPointerException()) + .given(child1) + .onError(request, error, 123456L, profile, node, "test"); + // when + tracker.onError(request, error, 123456L, profile, node, "test"); + // then + verify(child1).onError(request, error, 123456L, profile, node, "test"); + verify(child2).onError(request, error, 123456L, profile, node, "test"); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "[test] Unexpected error while notifying request tracker child1 of an onError event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onNodeSuccess() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); + willThrow(new NullPointerException()) + .given(child1) + .onNodeSuccess(request, 123456L, profile, node, "test"); + // when + tracker.onNodeSuccess(request, 123456L, profile, node, "test"); + // then + verify(child1).onNodeSuccess(request, 123456L, profile, node, "test"); + verify(child2).onNodeSuccess(request, 123456L, profile, node, "test"); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "[test] Unexpected error while notifying request tracker child1 of an onNodeSuccess event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onNodeError() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); + willThrow(new NullPointerException()) + .given(child1) + .onNodeError(request, error, 123456L, profile, node, "test"); + // when + tracker.onNodeError(request, error, 123456L, profile, node, "test"); + // then + verify(child1).onNodeError(request, error, 123456L, profile, node, "test"); + verify(child2).onNodeError(request, error, 123456L, profile, node, "test"); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "[test] Unexpected error while notifying request tracker child1 of an onNodeError event. (NullPointerException: null)"); + } + + @Test + public void should_notify_onSessionReady() { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); + willThrow(new NullPointerException()).given(child1).onSessionReady(session); + given(session.getName()).willReturn("test"); + // when + tracker.onSessionReady(session); + // then + verify(child1).onSessionReady(session); + verify(child2).onSessionReady(session); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "[test] Unexpected error while notifying request tracker child1 of an onSessionReady event. (NullPointerException: null)"); + } + + @Test + public void should_notify_close() throws Exception { + // given + MultiplexingRequestTracker tracker = new MultiplexingRequestTracker(child1, child2); + Exception child1Error = new NullPointerException(); + willThrow(child1Error).given(child1).close(); + // when + tracker.close(); + // then + verify(child1).close(); + verify(child2).close(); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getAllValues().stream().map(ILoggingEvent::getFormattedMessage)) + .contains( + "Unexpected error while closing request tracker child1. (NullPointerException: null)"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java new file mode 100644 index 00000000000..fb1883e125f --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestIdGeneratorTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.tracker; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.Strict.class) +public class RequestIdGeneratorTest { + @Mock private InternalDriverContext context; + @Mock private Statement statement; + + @Test + public void uuid_generator_should_generate() { + // given + UuidRequestIdGenerator generator = new UuidRequestIdGenerator(context); + // when + String parentId = generator.getSessionRequestId(); + String requestId = generator.getNodeRequestId(statement, parentId); + // then + // e.g. "550e8400-e29b-41d4-a716-446655440000", which is 36 characters long + assertThat(parentId.length()).isEqualTo(36); + // e.g. "550e8400-e29b-41d4-a716-446655440000-550e8400-e29b-41d4-a716-446655440000", which is 73 + // characters long + assertThat(requestId.length()).isEqualTo(73); + } + + @Test + public void w3c_generator_should_generate() { + // given + W3CContextRequestIdGenerator generator = new W3CContextRequestIdGenerator(context); + // when + String parentId = generator.getSessionRequestId(); + String requestId = generator.getNodeRequestId(statement, parentId); + // then + // e.g. "4bf92f3577b34da6a3ce929d0e0e4736", which is 32 characters long + assertThat(parentId.length()).isEqualTo(32); + // According to W3C "traceparent" spec, + // https://www.w3.org/TR/trace-context/#traceparent-header-field-values + // e.g. "00-4bf92f3577b34da6a3ce929d0e0e4736-a3ce929d0e0e4736-01", which 55 characters long + assertThat(requestId.length()).isEqualTo(55); + } + + @Test + public void w3c_generator_default_payloadkey() { + W3CContextRequestIdGenerator w3cGenerator = new W3CContextRequestIdGenerator(context); + assertThat(w3cGenerator.getCustomPayloadKey()) + .isEqualTo(RequestIdGenerator.DEFAULT_PAYLOAD_KEY); + } + + @Test + public void w3c_generator_provided_payloadkey() { + String someString = RandomStringUtils.random(12); + W3CContextRequestIdGenerator w3cGenerator = new W3CContextRequestIdGenerator(someString); + assertThat(w3cGenerator.getCustomPayloadKey()).isEqualTo(someString); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java index 160e5d04dd9..e9fb518b51f 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/tracker/RequestLogFormatterTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java index e8dac19237b..d798df8d191 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeDetachableTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java index ed53b0b4e65..ccf53dd3a65 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/DataTypeSerializationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java new file mode 100644 index 00000000000..f9ae1d24f77 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/PrimitiveTypeTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.TestDataProviders; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.protocol.internal.ProtocolConstants; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Locale; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class PrimitiveTypeTest { + + @Test + public void should_report_protocol_code() { + assertThat(DataTypes.ASCII.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.ASCII); + assertThat(DataTypes.BIGINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BIGINT); + assertThat(DataTypes.BLOB.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BLOB); + assertThat(DataTypes.BOOLEAN.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.BOOLEAN); + assertThat(DataTypes.COUNTER.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.COUNTER); + assertThat(DataTypes.DECIMAL.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DECIMAL); + assertThat(DataTypes.DOUBLE.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DOUBLE); + assertThat(DataTypes.FLOAT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.FLOAT); + assertThat(DataTypes.INT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.INT); + assertThat(DataTypes.TIMESTAMP.getProtocolCode()) + .isEqualTo(ProtocolConstants.DataType.TIMESTAMP); + assertThat(DataTypes.UUID.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.UUID); + assertThat(DataTypes.VARINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.VARINT); + assertThat(DataTypes.TIMEUUID.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TIMEUUID); + assertThat(DataTypes.INET.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.INET); + assertThat(DataTypes.DATE.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DATE); + assertThat(DataTypes.TEXT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.VARCHAR); + assertThat(DataTypes.TIME.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TIME); + assertThat(DataTypes.SMALLINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.SMALLINT); + assertThat(DataTypes.TINYINT.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.TINYINT); + assertThat(DataTypes.DURATION.getProtocolCode()).isEqualTo(ProtocolConstants.DataType.DURATION); + } + + @Test + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_format_as_cql(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(DataTypes.ASCII.asCql(true, true)).isEqualTo("ascii"); + assertThat(DataTypes.BIGINT.asCql(true, true)).isEqualTo("bigint"); + assertThat(DataTypes.BLOB.asCql(true, true)).isEqualTo("blob"); + assertThat(DataTypes.BOOLEAN.asCql(true, true)).isEqualTo("boolean"); + assertThat(DataTypes.COUNTER.asCql(true, true)).isEqualTo("counter"); + assertThat(DataTypes.DECIMAL.asCql(true, true)).isEqualTo("decimal"); + assertThat(DataTypes.DOUBLE.asCql(true, true)).isEqualTo("double"); + assertThat(DataTypes.FLOAT.asCql(true, true)).isEqualTo("float"); + assertThat(DataTypes.INT.asCql(true, true)).isEqualTo("int"); + assertThat(DataTypes.TIMESTAMP.asCql(true, true)).isEqualTo("timestamp"); + assertThat(DataTypes.UUID.asCql(true, true)).isEqualTo("uuid"); + assertThat(DataTypes.VARINT.asCql(true, true)).isEqualTo("varint"); + assertThat(DataTypes.TIMEUUID.asCql(true, true)).isEqualTo("timeuuid"); + assertThat(DataTypes.INET.asCql(true, true)).isEqualTo("inet"); + assertThat(DataTypes.DATE.asCql(true, true)).isEqualTo("date"); + assertThat(DataTypes.TEXT.asCql(true, true)).isEqualTo("text"); + assertThat(DataTypes.TIME.asCql(true, true)).isEqualTo("time"); + assertThat(DataTypes.SMALLINT.asCql(true, true)).isEqualTo("smallint"); + assertThat(DataTypes.TINYINT.asCql(true, true)).isEqualTo("tinyint"); + assertThat(DataTypes.DURATION.asCql(true, true)).isEqualTo("duration"); + } finally { + Locale.setDefault(def); + } + } + + @Test + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_format_as_string(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + assertThat(DataTypes.ASCII.toString()).isEqualTo("ASCII"); + assertThat(DataTypes.BIGINT.toString()).isEqualTo("BIGINT"); + assertThat(DataTypes.BLOB.toString()).isEqualTo("BLOB"); + assertThat(DataTypes.BOOLEAN.toString()).isEqualTo("BOOLEAN"); + assertThat(DataTypes.COUNTER.toString()).isEqualTo("COUNTER"); + assertThat(DataTypes.DECIMAL.toString()).isEqualTo("DECIMAL"); + assertThat(DataTypes.DOUBLE.toString()).isEqualTo("DOUBLE"); + assertThat(DataTypes.FLOAT.toString()).isEqualTo("FLOAT"); + assertThat(DataTypes.INT.toString()).isEqualTo("INT"); + assertThat(DataTypes.TIMESTAMP.toString()).isEqualTo("TIMESTAMP"); + assertThat(DataTypes.UUID.toString()).isEqualTo("UUID"); + assertThat(DataTypes.VARINT.toString()).isEqualTo("VARINT"); + assertThat(DataTypes.TIMEUUID.toString()).isEqualTo("TIMEUUID"); + assertThat(DataTypes.INET.toString()).isEqualTo("INET"); + assertThat(DataTypes.DATE.toString()).isEqualTo("DATE"); + assertThat(DataTypes.TEXT.toString()).isEqualTo("TEXT"); + assertThat(DataTypes.TIME.toString()).isEqualTo("TIME"); + assertThat(DataTypes.SMALLINT.toString()).isEqualTo("SMALLINT"); + assertThat(DataTypes.TINYINT.toString()).isEqualTo("TINYINT"); + assertThat(DataTypes.DURATION.toString()).isEqualTo("DURATION"); + } finally { + Locale.setDefault(def); + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java new file mode 100644 index 00000000000..43c01ea35dc --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/AsciiCodecTest.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import org.junit.Test; + +public class AsciiCodecTest extends CodecTestBase { + public AsciiCodecTest() { + this.codec = TypeCodecs.ASCII; + } + + @Test + public void should_encode() { + assertThat(encode("hello")).isEqualTo("0x68656c6c6f"); + assertThat(encode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_encode_non_ascii() { + encode("hëllo"); + } + + @Test + public void should_decode() { + assertThat(decode("0x68656c6c6f")).isEqualTo("hello"); + assertThat(decode("0x")).isEmpty(); + assertThat(decode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_non_ascii() { + decode("0x68c3ab6c6c6f"); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java index a2d7fd91ee0..c5360c90a7b 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BigIntCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -67,7 +69,7 @@ public void should_fail_to_parse_invalid_input() { @Test(expected = IllegalArgumentException.class) public void should_fail_to_parse_if_out_of_range() { - parse(Long.toString(Long.MAX_VALUE) + "0"); + parse(Long.MAX_VALUE + "0"); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java index c0448e38dbd..ec1ab294911 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BlobCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java index 9433984011f..57fcef1235d 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/BooleanCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java index 5fba391f94f..8a00cceda09 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CodecTestBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java index 70dbd91c305..c18c6e76d7c 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CounterCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -67,7 +69,7 @@ public void should_fail_to_parse_invalid_input() { @Test(expected = IllegalArgumentException.class) public void should_fail_to_parse_if_out_of_range() { - parse(Long.toString(Long.MAX_VALUE) + "0"); + parse(Long.MAX_VALUE + "0"); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java index 3a8e36d8c42..43526f72e57 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlDurationCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java index f52d139f1b4..4f04f3defec 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,56 +17,31 @@ */ package com.datastax.oss.driver.internal.core.type.codec; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; +import edu.umd.cs.findbugs.annotations.Nullable; /** * A sample user codec implementation that we use in our tests. * *

      It maps a CQL string to a Java string containing its textual representation. */ -public class CqlIntToStringCodec implements TypeCodec { - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.STRING; - } +public class CqlIntToStringCodec extends MappingCodec { - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public ByteBuffer encode(String value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } else { - return TypeCodecs.INT.encode(Integer.parseInt(value), protocolVersion); - } - } - - @Override - public String decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return TypeCodecs.INT.decode(bytes, protocolVersion).toString(); + public CqlIntToStringCodec() { + super(TypeCodecs.INT, GenericType.STRING); } - @NonNull + @Nullable @Override - public String format(String value) { - throw new UnsupportedOperationException("Not implemented for this test"); + protected String innerToOuter(@Nullable Integer value) { + return value == null ? null : value.toString(); } + @Nullable @Override - public String parse(String value) { - throw new UnsupportedOperationException("Not implemented for this test"); + protected Integer outerToInner(@Nullable String value) { + return value == null ? null : Integer.parseInt(value); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java index 545b03c1f4f..a832b51cfec 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CustomCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java index 55fcf4c78a9..48388fbc692 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DateCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java index b0c7e2bec79..eac360fdcc5 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DecimalCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java index 2c72249b597..f27081aa784 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/DoubleCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java index 6864e3a788a..62d5b549153 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/FloatCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java index e47c74ba8c1..e10fa695ba0 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/InetCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java index 931934e3f55..b5268a7e844 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/IntCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java index 7260a2ee3ac..975aa3a1428 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ListCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -93,6 +95,20 @@ public void should_decode_non_empty_list() { .containsExactly(1, 2, 3); } + @Test + public void should_decode_list_with_null_elements() { + when(elementCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); + assertThat( + decode( + "0x" + + "00000002" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + + "00000002" // size of element 2 + + "0002" // contents of element 2 + )) + .containsExactly(null, 2); + } + @Test public void should_format_null_list() { assertThat(format(null)).isEqualTo("NULL"); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java index 96de17f75e8..94cb33a5a99 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MapCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -118,6 +120,16 @@ public void should_decode_non_empty_map() { .containsEntry("c", 3); } + @Test + public void should_decode_map_with_null_elements() { + when(keyCodec.decode(Bytes.fromHexString("0x10"), ProtocolVersion.DEFAULT)).thenReturn("a"); + when(valueCodec.decode(Bytes.fromHexString("0x0002"), ProtocolVersion.DEFAULT)).thenReturn(2); + assertThat(decode("0x" + "00000002" + "0000000110" + "FFFFFFFF" + "FFFFFFFF" + "000000020002")) + .containsOnlyKeys("a", null) + .containsEntry("a", null) + .containsEntry(null, 2); + } + @Test public void should_format_null_map() { assertThat(format(null)).isEqualTo("NULL"); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java new file mode 100644 index 00000000000..f78dc774f62 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/MappingCodecTest.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import org.junit.Test; + +public class MappingCodecTest extends CodecTestBase { + + public MappingCodecTest() { + this.codec = new CqlIntToStringCodec(); + } + + @Test + public void should_encode() { + // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try + // a thousand different values. + assertThat(encode("0")).isEqualTo("0x00000000"); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + assertThat(decode("0x00000000")).isEqualTo("0"); + assertThat(decode("0x")).isNull(); + assertThat(decode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_not_enough_bytes() { + decode("0x0000"); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_too_many_bytes() { + decode("0x0000000000000000"); + } + + @Test + public void should_format() { + assertThat(format("0")).isEqualTo("0"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse("0")).isEqualTo("0"); + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_input() { + parse("not an int"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.of(String.class))).isTrue(); + assertThat(codec.accepts(GenericType.of(int.class))).isFalse(); + assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(String.class)).isTrue(); + assertThat(codec.accepts(int.class)).isFalse(); + assertThat(codec.accepts(Integer.class)).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts("123")).isTrue(); + // codec accepts any String, even if it can't be encoded + assertThat(codec.accepts("not an int")).isTrue(); + assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); + } + + @Test + public void should_expose_inner_and_outer_java_types() { + assertThat(((MappingCodec) codec).getInnerJavaType()).isEqualTo(GenericType.INTEGER); + assertThat(codec.getJavaType()).isEqualTo(GenericType.STRING); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java index 9e6b590d2f4..a302357c9f3 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SetCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -93,6 +95,12 @@ public void should_decode_non_empty_set() { .containsExactly(1, 2, 3); } + @Test + public void should_decode_set_with_null_elements() { + when(elementCodec.decode(Bytes.fromHexString("0x01"), ProtocolVersion.DEFAULT)).thenReturn(1); + assertThat(decode("0x" + "00000002" + "0000000101" + "FFFFFFFF")).containsExactly(1, null); + } + @Test public void should_format_null_set() { assertThat(format(null)).isEqualTo("NULL"); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java new file mode 100644 index 00000000000..3f40efb16ac --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SimpleBlobCodecTest.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; +import org.junit.Test; + +public class SimpleBlobCodecTest extends CodecTestBase { + + private static final ByteBuffer BUFFER = Bytes.fromHexString("0xcafebabe"); + private static final byte[] ARRAY = Bytes.getArray(Bytes.fromHexString("0xcafebabe")); + + public SimpleBlobCodecTest() { + this.codec = ExtraTypeCodecs.BLOB_TO_ARRAY; + } + + @Test + public void should_encode() { + assertThat(encode(ARRAY)).isEqualTo("0xcafebabe"); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_not_share_position_between_input_and_encoded() { + ByteBuffer encoded = codec.encode(ARRAY, ProtocolVersion.DEFAULT); + assertThat(encoded).isNotNull(); + assertThat(ARRAY).isEqualTo(Bytes.getArray(encoded)); + } + + @Test + public void should_decode() { + assertThat(decode("0xcafebabe")).isEqualTo(ARRAY); + assertThat(decode("0x")).hasSize(0); + assertThat(decode(null)).isNull(); + } + + @Test + public void should_not_share_position_between_decoded_and_input() { + byte[] decoded = codec.decode(BUFFER, ProtocolVersion.DEFAULT); + assertThat(decoded).isEqualTo(ARRAY); + } + + @Test + public void should_format() { + assertThat(format(ARRAY)).isEqualTo("0xcafebabe"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse("0xcafebabe")).isEqualTo(ARRAY); + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_input() { + parse("not a blob"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.of(byte[].class))).isTrue(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(byte[].class)).isTrue(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(ARRAY)).isTrue(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java index 75c436e0475..483dd0b65bd 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/SmallIntCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/StringCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java similarity index 76% rename from core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/StringCodecTest.java rename to core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java index 77f33c1ae93..a42178544d4 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/StringCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TextCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,10 +23,10 @@ import com.datastax.oss.driver.api.core.type.reflect.GenericType; import org.junit.Test; -public class StringCodecTest extends CodecTestBase { +public class TextCodecTest extends CodecTestBase { - public StringCodecTest() { - // We don't test ASCII, since it only differs by the encoding used + public TextCodecTest() { + // We will test edge cases of ASCII in AsciiCodecTest this.codec = TypeCodecs.TEXT; } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java index 6c346b145aa..6d77efd396a 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java index ae89ce5d9a8..416bee8e4df 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimeUuidCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java index 97a57c2fabc..5cfd17da622 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TimestampCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java index ae31be9dc42..358c36e9386 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TinyIntCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java index f7d609ea967..c51eea20c2e 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/TupleCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +18,10 @@ package com.datastax.oss.driver.internal.core.type.codec; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; @@ -122,6 +126,26 @@ public void should_decode_tuple() { verify(textCodec).decode(Bytes.fromHexString("0x61"), ProtocolVersion.DEFAULT); } + /** Test for JAVA-2557. Ensures that the codec can decode null fields with any negative length. */ + @Test + public void should_decode_negative_element_length_as_null_field() { + TupleValue tuple = + decode( + "0x" + + "ffffffff" // field1 has length -1 + + "fffffffe" // field2 has length -2 + + "80000000" // field3 has length Integer.MIN_VALUE (-2147483648) + ); + + assertThat(tuple.isNull(0)).isTrue(); + assertThat(tuple.isNull(1)).isTrue(); + assertThat(tuple.isNull(2)).isTrue(); + + verifyZeroInteractions(intCodec); + verifyZeroInteractions(doubleCodec); + verifyZeroInteractions(textCodec); + } + @Test public void should_format_null_tuple() { assertThat(format(null)).isEqualTo("NULL"); @@ -149,7 +173,33 @@ public void should_parse_null_tuple() { } @Test - public void should_parse_tuple() { + public void should_parse_empty_tuple() { + TupleValue tuple = parse("()"); + + assertThat(tuple.isNull(0)).isTrue(); + assertThat(tuple.isNull(1)).isTrue(); + assertThat(tuple.isNull(2)).isTrue(); + + verifyNoMoreInteractions(intCodec); + verifyNoMoreInteractions(doubleCodec); + verifyNoMoreInteractions(textCodec); + } + + @Test + public void should_parse_partial_tuple() { + TupleValue tuple = parse("(1,NULL)"); + + assertThat(tuple.getInt(0)).isEqualTo(1); + assertThat(tuple.isNull(1)).isTrue(); + assertThat(tuple.isNull(2)).isTrue(); + + verify(intCodec).parse("1"); + verify(doubleCodec).parse("NULL"); + verifyNoMoreInteractions(textCodec); + } + + @Test + public void should_parse_full_tuple() { TupleValue tuple = parse("(1,NULL,'a')"); assertThat(tuple.getInt(0)).isEqualTo(1); @@ -161,9 +211,80 @@ public void should_parse_tuple() { verify(textCodec).parse("'a'"); } - @Test(expected = IllegalArgumentException.class) + @Test + public void should_parse_tuple_with_extra_whitespace() { + TupleValue tuple = parse(" ( 1 , NULL , 'a' ) "); + + assertThat(tuple.getInt(0)).isEqualTo(1); + assertThat(tuple.isNull(1)).isTrue(); + assertThat(tuple.getString(2)).isEqualTo("a"); + + verify(intCodec).parse("1"); + verify(doubleCodec).parse("NULL"); + verify(textCodec).parse("'a'"); + } + + @Test public void should_fail_to_parse_invalid_input() { - parse("not a tuple"); + // general tuple structure invalid + assertThatThrownBy(() -> parse("not a tuple")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"not a tuple\", at character 0 expecting '(' but got 'n'"); + assertThatThrownBy(() -> parse(" ( ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \" ( \", at field 0 (character 3) expecting CQL value or ')', got EOF"); + assertThatThrownBy(() -> parse("( [")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"( [\", invalid CQL value at field 0 (character 2)"); + assertThatThrownBy(() -> parse("( 12 , ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"( 12 , \", at field 1 (character 7) expecting CQL value or ')', got EOF"); + assertThatThrownBy(() -> parse("( 12 12.34 ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"( 12 12.34 \", at field 0 (character 5) expecting ',' but got '1'"); + assertThatThrownBy(() -> parse("(1234,12.34,'text'")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"(1234,12.34,'text'\", at field 2 (character 18) expecting ',' or ')', but got EOF"); + assertThatThrownBy(() -> parse("(1234,12.34,'text'))")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"(1234,12.34,'text'))\", at character 19 expecting EOF or blank, but got \")\""); + assertThatThrownBy(() -> parse("())")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"())\", at character 2 expecting EOF or blank, but got \")\""); + assertThatThrownBy(() -> parse("(1234,12.34,'text') extra")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"(1234,12.34,'text') extra\", at character 20 expecting EOF or blank, but got \"extra\""); + // element syntax invalid + assertThatThrownBy(() -> parse("(not a valid int,12.34,'text')")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"(not a valid int,12.34,'text')\", " + + "invalid CQL value at field 0 (character 1): " + + "Cannot parse 32-bits int value from \"not\"") + .hasRootCauseInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> parse("(1234,not a valid double,'text')")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"(1234,not a valid double,'text')\", " + + "invalid CQL value at field 1 (character 6): " + + "Cannot parse 64-bits double value from \"not\"") + .hasRootCauseInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> parse("(1234,12.34,not a valid text)")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse tuple value from \"(1234,12.34,not a valid text)\", " + + "invalid CQL value at field 2 (character 12): " + + "text or varchar values must be enclosed by single quotes") + .hasRootCauseInstanceOf(IllegalArgumentException.class); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java index 5947cfffef3..af94247f937 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +18,10 @@ package com.datastax.oss.driver.internal.core.type.codec; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; @@ -131,6 +135,54 @@ public void should_decode_udt() { verify(textCodec).decode(Bytes.fromHexString("0x61"), ProtocolVersion.DEFAULT); } + @Test + public void should_decode_udt_when_too_many_fields() { + UdtValue udt = + decode( + "0x" + + ("00000004" + "00000001") + + "ffffffff" + + ("00000001" + "61") + // extra contents + + "ffffffff"); + assertThat(udt.getInt(0)).isEqualTo(1); + assertThat(udt.isNull(1)).isTrue(); + assertThat(udt.getString(2)).isEqualTo("a"); + } + + /** Test for JAVA-2557. Ensures that the codec can decode null fields with any negative length. */ + @Test + public void should_decode_negative_element_length_as_null_field() { + UdtValue udt = + decode( + "0x" + + "ffffffff" // field1 has length -1 + + "fffffffe" // field2 has length -2 + + "80000000" // field3 has length Integer.MIN_VALUE (-2147483648) + ); + + assertThat(udt.isNull(0)).isTrue(); + assertThat(udt.isNull(1)).isTrue(); + assertThat(udt.isNull(2)).isTrue(); + + verifyZeroInteractions(intCodec); + verifyZeroInteractions(doubleCodec); + verifyZeroInteractions(textCodec); + } + + @Test + public void should_decode_absent_element_as_null_field() { + UdtValue udt = decode("0x"); + + assertThat(udt.isNull(0)).isTrue(); + assertThat(udt.isNull(1)).isTrue(); + assertThat(udt.isNull(2)).isTrue(); + + verifyZeroInteractions(intCodec); + verifyZeroInteractions(doubleCodec); + verifyZeroInteractions(textCodec); + } + @Test public void should_format_null_udt() { assertThat(format(null)).isEqualTo("NULL"); @@ -158,7 +210,33 @@ public void should_parse_null_udt() { } @Test - public void should_parse_udt() { + public void should_parse_empty_udt() { + UdtValue udt = parse("{}"); + + assertThat(udt.isNull(0)).isTrue(); + assertThat(udt.isNull(1)).isTrue(); + assertThat(udt.isNull(2)).isTrue(); + + verifyNoMoreInteractions(intCodec); + verifyNoMoreInteractions(doubleCodec); + verifyNoMoreInteractions(textCodec); + } + + @Test + public void should_parse_partial_udt() { + UdtValue udt = parse("{field1:1,field2:NULL}"); + + assertThat(udt.getInt(0)).isEqualTo(1); + assertThat(udt.isNull(1)).isTrue(); + assertThat(udt.isNull(2)).isTrue(); + + verify(intCodec).parse("1"); + verify(doubleCodec).parse("NULL"); + verifyNoMoreInteractions(textCodec); + } + + @Test + public void should_parse_full_udt() { UdtValue udt = parse("{field1:1,field2:NULL,field3:'a'}"); assertThat(udt.getInt(0)).isEqualTo(1); @@ -170,9 +248,96 @@ public void should_parse_udt() { verify(textCodec).parse("'a'"); } - @Test(expected = IllegalArgumentException.class) + @Test + public void should_parse_udt_with_extra_whitespace() { + UdtValue udt = parse(" { field1 : 1 , field2 : NULL , field3 : 'a' } "); + + assertThat(udt.getInt(0)).isEqualTo(1); + assertThat(udt.isNull(1)).isTrue(); + assertThat(udt.getString(2)).isEqualTo("a"); + + verify(intCodec).parse("1"); + verify(doubleCodec).parse("NULL"); + verify(textCodec).parse("'a'"); + } + + @Test public void should_fail_to_parse_invalid_input() { - parse("not a udt"); + // general UDT structure invalid + assertThatThrownBy(() -> parse("not a udt")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"not a udt\" at character 0: expecting '{' but got 'n'"); + assertThatThrownBy(() -> parse(" { ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \" { \" at character 3: expecting CQL identifier or '}', got EOF"); + assertThatThrownBy(() -> parse("{ [ ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ [ \", cannot parse a CQL identifier at character 2"); + assertThatThrownBy(() -> parse("{ field1 ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ field1 \", at field field1 (character 9) expecting ':', but got EOF"); + assertThatThrownBy(() -> parse("{ field1 ,")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ field1 ,\", at field field1 (character 9) expecting ':', but got ','"); + assertThatThrownBy(() -> parse("{nonExistentField:NULL}")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{nonExistentField:NULL}\", unknown CQL identifier at character 17: \"nonExistentField\""); + assertThatThrownBy(() -> parse("{ field1 : ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ field1 : \", invalid CQL value at field field1 (character 11)"); + assertThatThrownBy(() -> parse("{ field1 : [")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ field1 : [\", invalid CQL value at field field1 (character 11)"); + assertThatThrownBy(() -> parse("{ field1 : 1 , ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ field1 : 1 , \" at field field1 (character 15): expecting CQL identifier or '}', got EOF"); + assertThatThrownBy(() -> parse("{ field1 : 1 field2 ")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{ field1 : 1 field2 \", at field field1 (character 13) expecting ',' but got 'f'"); + assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'\", at field field3 (character 33) expecting ',' or '}', but got EOF"); + assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'}}")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'}}\", at character 34 expecting EOF or blank, but got \"}\""); + assertThatThrownBy(() -> parse("{field1:1,field2:12.34,field3:'a'} extra")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{field1:1,field2:12.34,field3:'a'} extra\", at character 35 expecting EOF or blank, but got \"extra\""); + // element syntax invalid + assertThatThrownBy(() -> parse("{field1:not a valid int,field2:NULL,field3:'a'}")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{field1:not a valid int,field2:NULL,field3:'a'}\", " + + "invalid CQL value at field field1 (character 8): " + + "Cannot parse 32-bits int value from \"not\"") + .hasRootCauseInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> parse("{field1:1,field2:not a valid double,field3:'a'}")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{field1:1,field2:not a valid double,field3:'a'}\", " + + "invalid CQL value at field field2 (character 17): " + + "Cannot parse 64-bits double value from \"not\"") + .hasRootCauseInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> parse("{field1:1,field2:NULL,field3:not a valid text}")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage( + "Cannot parse UDT value from \"{field1:1,field2:NULL,field3:not a valid text}\", " + + "invalid CQL value at field field3 (character 29): " + + "text or varchar values must be enclosed by single quotes") + .hasRootCauseInstanceOf(IllegalArgumentException.class); } @Test diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java index 16baada6810..e62fb4af15b 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UuidCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java index e52dd93919c..a3472d4b8ce 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VarintCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java new file mode 100644 index 00000000000..17c78514127 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/VectorCodecTest.java @@ -0,0 +1,274 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.data.CqlVector; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.type.DefaultVectorType; +import com.datastax.oss.protocol.internal.util.Bytes; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.nio.ByteBuffer; +import java.time.LocalTime; +import java.util.HashMap; +import org.apache.commons.lang3.ArrayUtils; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class VectorCodecTest { + + @DataProvider + public static Object[] dataProvider() { + HashMap map1 = new HashMap<>(); + map1.put(1, "a"); + HashMap map2 = new HashMap<>(); + map2.put(2, "b"); + return new TestDataContainer[] { + new TestDataContainer( + DataTypes.FLOAT, + new Float[] {1.0f, 2.5f}, + "[1.0, 2.5]", + Bytes.fromHexString("0x3f80000040200000")), + new TestDataContainer( + DataTypes.ASCII, + new String[] {"ab", "cde"}, + "['ab', 'cde']", + Bytes.fromHexString("0x02616203636465")), + new TestDataContainer( + DataTypes.BIGINT, + new Long[] {1L, 2L}, + "[1, 2]", + Bytes.fromHexString("0x00000000000000010000000000000002")), + new TestDataContainer( + DataTypes.BLOB, + new ByteBuffer[] {Bytes.fromHexString("0xCAFE"), Bytes.fromHexString("0xABCD")}, + "[0xcafe, 0xabcd]", + Bytes.fromHexString("0x02cafe02abcd")), + new TestDataContainer( + DataTypes.BOOLEAN, + new Boolean[] {true, false}, + "[true, false]", + Bytes.fromHexString("0x0100")), + new TestDataContainer( + DataTypes.TIME, + new LocalTime[] {LocalTime.ofNanoOfDay(1), LocalTime.ofNanoOfDay(2)}, + "['00:00:00.000000001', '00:00:00.000000002']", + Bytes.fromHexString("0x080000000000000001080000000000000002")), + new TestDataContainer( + DataTypes.mapOf(DataTypes.INT, DataTypes.ASCII), + new HashMap[] {map1, map2}, + "[{1:'a'}, {2:'b'}]", + Bytes.fromHexString( + "0x110000000100000004000000010000000161110000000100000004000000020000000162")), + new TestDataContainer( + DataTypes.vectorOf(DataTypes.INT, 1), + new CqlVector[] {CqlVector.newInstance(1), CqlVector.newInstance(2)}, + "[[1], [2]]", + Bytes.fromHexString("0x0000000100000002")), + new TestDataContainer( + DataTypes.vectorOf(DataTypes.TEXT, 1), + new CqlVector[] {CqlVector.newInstance("ab"), CqlVector.newInstance("cdef")}, + "[['ab'], ['cdef']]", + Bytes.fromHexString("0x03026162050463646566")), + new TestDataContainer( + DataTypes.vectorOf(DataTypes.vectorOf(DataTypes.FLOAT, 2), 1), + new CqlVector[] { + CqlVector.newInstance(CqlVector.newInstance(1.0f, 2.5f)), + CqlVector.newInstance(CqlVector.newInstance(3.0f, 4.5f)) + }, + "[[[1.0, 2.5]], [[3.0, 4.5]]]", + Bytes.fromHexString("0x3f800000402000004040000040900000")) + }; + } + + @UseDataProvider("dataProvider") + @Test + public void should_encode(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + CqlVector vector = CqlVector.newInstance(testData.getValues()); + assertThat(codec.encode(vector, ProtocolVersion.DEFAULT)).isEqualTo(testData.getBytes()); + } + + @Test + @UseDataProvider("dataProvider") + public void should_throw_on_encode_with_too_few_elements(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThatThrownBy( + () -> + codec.encode( + CqlVector.newInstance(testData.getValues()[0]), ProtocolVersion.DEFAULT)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + @UseDataProvider("dataProvider") + public void should_throw_on_encode_with_too_many_elements(TestDataContainer testData) { + Object[] doubled = ArrayUtils.addAll(testData.getValues(), testData.getValues()); + TypeCodec> codec = getCodec(testData.getDataType()); + assertThatThrownBy(() -> codec.encode(CqlVector.newInstance(doubled), ProtocolVersion.DEFAULT)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + @UseDataProvider("dataProvider") + public void should_decode(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThat(codec.decode(testData.getBytes(), ProtocolVersion.DEFAULT)) + .isEqualTo(CqlVector.newInstance(testData.getValues())); + } + + @Test + @UseDataProvider("dataProvider") + public void should_throw_on_decode_if_too_few_bytes(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + int lastIndex = testData.getBytes().remaining() - 1; + assertThatThrownBy( + () -> + codec.decode( + (ByteBuffer) testData.getBytes().duplicate().limit(lastIndex), + ProtocolVersion.DEFAULT)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + @UseDataProvider("dataProvider") + public void should_throw_on_decode_if_too_many_bytes(TestDataContainer testData) { + ByteBuffer doubled = ByteBuffer.allocate(testData.getBytes().remaining() * 2); + doubled.put(testData.getBytes().duplicate()).put(testData.getBytes().duplicate()).flip(); + TypeCodec> codec = getCodec(testData.getDataType()); + assertThatThrownBy(() -> codec.decode(doubled, ProtocolVersion.DEFAULT)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + @UseDataProvider("dataProvider") + public void should_format(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + CqlVector vector = CqlVector.newInstance(testData.getValues()); + assertThat(codec.format(vector)).isEqualTo(testData.getFormatted()); + } + + @Test + @UseDataProvider("dataProvider") + public void should_parse(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThat(codec.parse(testData.getFormatted())) + .isEqualTo(CqlVector.newInstance(testData.getValues())); + } + + @Test + @UseDataProvider("dataProvider") + public void should_accept_data_type(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 2))).isTrue(); + assertThat(codec.accepts(new DefaultVectorType(DataTypes.custom("non-existent"), 2))).isFalse(); + } + + @Test + @UseDataProvider("dataProvider") + public void should_accept_vector_type_correct_dimension_only(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 0))).isFalse(); + assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 1))).isFalse(); + assertThat(codec.accepts(new DefaultVectorType(testData.getDataType(), 3))).isFalse(); + } + + @Test + @UseDataProvider("dataProvider") + public void should_accept_generic_type(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThat(codec.accepts(codec.getJavaType())).isTrue(); + } + + @Test + @UseDataProvider("dataProvider") + public void should_accept_raw_type(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + assertThat(codec.accepts(CqlVector.class)).isTrue(); + assertThat(codec.accepts(Integer.class)).isFalse(); + } + + @Test + @UseDataProvider("dataProvider") + public void should_accept_object(TestDataContainer testData) { + TypeCodec> codec = getCodec(testData.getDataType()); + CqlVector vector = CqlVector.newInstance(testData.getValues()); + assertThat(codec.accepts(vector)).isTrue(); + assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); + } + + @Test + public void should_handle_null_and_empty() { + TypeCodec> codec = getCodec(DataTypes.FLOAT); + assertThat(codec.encode(null, ProtocolVersion.DEFAULT)).isNull(); + assertThat(codec.decode(Bytes.fromHexString("0x"), ProtocolVersion.DEFAULT)).isNull(); + assertThat(codec.format(null)).isEqualTo("NULL"); + assertThat(codec.parse("NULL")).isNull(); + assertThat(codec.parse("null")).isNull(); + assertThat(codec.parse("")).isNull(); + assertThat(codec.parse(null)).isNull(); + assertThatThrownBy(() -> codec.encode(CqlVector.newInstance(), ProtocolVersion.DEFAULT)) + .isInstanceOf(IllegalArgumentException.class); + } + + private static TypeCodec> getCodec(DataType dataType) { + return TypeCodecs.vectorOf( + DataTypes.vectorOf(dataType, 2), CodecRegistry.DEFAULT.codecFor(dataType)); + } + + private static class TestDataContainer { + private final DataType dataType; + private final Object[] values; + private final String formatted; + private final ByteBuffer bytes; + + public TestDataContainer( + DataType dataType, Object[] values, String formatted, ByteBuffer bytes) { + this.dataType = dataType; + this.values = values; + this.formatted = formatted; + this.bytes = bytes; + } + + public DataType getDataType() { + return dataType; + } + + public Object[] getValues() { + return values; + } + + public String getFormatted() { + return formatted; + } + + public ByteBuffer getBytes() { + return bytes; + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java new file mode 100644 index 00000000000..745ba7a3aa8 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodecTest.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import java.util.Optional; +import org.junit.Before; +import org.junit.Test; + +public class OptionalCodecTest extends CodecTestBase> { + + @Before + public void setup() { + codec = ExtraTypeCodecs.optionalOf(TypeCodecs.INT); + } + + @Test + public void should_encode() { + // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try + // a thousand different values. + assertThat(encode(Optional.of(1))).isEqualTo("0x00000001"); + assertThat(encode(Optional.empty())).isNull(); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + assertThat(decode("0x00000001")).isPresent().contains(1); + assertThat(decode("0x")).isEmpty(); + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_format() { + assertThat(format(Optional.of(1))).isEqualTo("1"); + assertThat(format(Optional.empty())).isEqualTo("NULL"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse("1")).isPresent().contains(1); + assertThat(parse("NULL")).isEmpty(); + assertThat(parse("null")).isEmpty(); + assertThat(parse("")).isEmpty(); + assertThat(parse(null)).isEmpty(); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.optionalOf(Integer.class))).isTrue(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(Optional.class)).isTrue(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(Optional.of(1))).isTrue(); + assertThat(codec.accepts(Optional.empty())).isTrue(); + assertThat(codec.accepts(Optional.of("foo"))).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java new file mode 100644 index 00000000000..4a175cdf306 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/BooleanArrayCodecTest.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class BooleanArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new boolean[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new boolean[] {true, false})) + .isEqualTo( + "0x" + + "00000002" // number of elements + + "00000001" // size of element 1 + + "01" // contents of element 1 + + "00000001" // size of element 2 + + "00" // contents of element 2 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000002" // number of elements + + "00000001" // size of element 1 + + "01" // contents of element 1 + + "00000001" // size of element 2 + + "00" // contents of element 2 + )) + .containsExactly(true, false); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new boolean[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new boolean[] {true, false})).isEqualTo("[true,false]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[true,false]")).containsExactly(true, false); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Boolean.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Boolean.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Boolean.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Boolean.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new boolean[] {true, false})).isTrue(); + assertThat(codec.accepts(new Boolean[] {true, false})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java new file mode 100644 index 00000000000..761b568fcea --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ByteArrayCodecTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class ByteArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.BYTE_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new byte[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new byte[] {1, 2, 3})) + .isEqualTo( + "0x" + + "00000003" // number of elements + + "00000001" // size of element 1 + + "01" // contents of element 1 + + "00000001" // size of element 2 + + "02" // contents of element 2 + + "00000001" // size of element 3 + + "03" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000003" // number of elements + + "00000001" // size of element 1 + + "01" // contents of element 1 + + "00000001" // size of element 2 + + "02" // contents of element 2 + + "00000001" // size of element 3 + + "03" // contents of element 3 + )) + .containsExactly((byte) 1, (byte) 2, (byte) 3); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new byte[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new byte[] {1, 2, 3})).isEqualTo("[1,2,3]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[1,2,3]")).containsExactly((byte) 1, (byte) 2, (byte) 3); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Byte.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Byte.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Byte.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Byte.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new byte[] {1, 2, 3})).isTrue(); + assertThat(codec.accepts(new Byte[] {1, 2, 3})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java new file mode 100644 index 00000000000..8e951f8ed55 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/DoubleArrayCodecTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class DoubleArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new double[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new double[] {1.1d, 2.2d, 3.3d})) + .isEqualTo( + "0x" + + "00000003" // number of elements + + "00000008" // size of element 1 + + "3ff199999999999a" // contents of element 1 + + "00000008" // size of element 2 + + "400199999999999a" // contents of element 2 + + "00000008" // size of element 3 + + "400a666666666666" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000003" // number of elements + + "00000008" // size of element 1 + + "3ff199999999999a" // contents of element 1 + + "00000008" // size of element 2 + + "400199999999999a" // contents of element 2 + + "00000008" // size of element 3 + + "400a666666666666" // contents of element 3 + )) + .containsExactly(1.1d, 2.2d, 3.3d); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new double[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new double[] {1.1d, 2.2d, 3.3d})).isEqualTo("[1.1,2.2,3.3]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[1.1,2.2,3.3]")).containsExactly(1.1d, 2.2d, 3.3d); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Double.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Double.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Double.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Double.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new double[] {1.1d, 2.2d, 3.3d})).isTrue(); + assertThat(codec.accepts(new Double[] {1.1d, 2.2d, 3.3d})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java new file mode 100644 index 00000000000..77f3eafdcd7 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/FloatArrayCodecTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class FloatArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new float[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new float[] {1.1f, 2.2f, 3.3f})) + .isEqualTo( + "0x" + + "00000003" // number of elements + + "00000004" // size of element 1 + + "3f8ccccd" // contents of element 1 + + "00000004" // size of element 2 + + "400ccccd" // contents of element 2 + + "00000004" // size of element 3 + + "40533333" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000003" // number of elements + + "00000004" // size of element 1 + + "3f8ccccd" // contents of element 1 + + "00000004" // size of element 2 + + "400ccccd" // contents of element 2 + + "00000004" // size of element 3 + + "40533333" // contents of element 3 + )) + .containsExactly(1.1f, 2.2f, 3.3f); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new float[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new float[] {1.1f, 2.2f, 3.3f})).isEqualTo("[1.1,2.2,3.3]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[1.1,2.2,3.3]")).containsExactly(1.1f, 2.2f, 3.3f); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Float.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Float.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Float.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Float.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new float[] {1.1f, 2.2f, 3.3f})).isTrue(); + assertThat(codec.accepts(new Float[] {1.1f, 2.2f, 3.3f})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java new file mode 100644 index 00000000000..ac00f1f8e1c --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/IntArrayCodecTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class IntArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.INT_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new int[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new int[] {1, 2, 3})) + .isEqualTo( + "0x" + + "00000003" // number of elements + + "00000004" // size of element 1 + + "00000001" // contents of element 1 + + "00000004" // size of element 2 + + "00000002" // contents of element 2 + + "00000004" // size of element 3 + + "00000003" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000003" // number of elements + + "00000004" // size of element 1 + + "00000001" // contents of element 1 + + "00000004" // size of element 2 + + "00000002" // contents of element 2 + + "00000004" // size of element 3 + + "00000003" // contents of element 3 + )) + .containsExactly(1, 2, 3); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new int[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new int[] {1, 2, 3})).isEqualTo("[1,2,3]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[1,2,3]")).containsExactly(1, 2, 3); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Integer.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Integer.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Integer.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Integer.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new int[] {1, 2, 3})).isTrue(); + assertThat(codec.accepts(new Integer[] {1, 2, 3})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java new file mode 100644 index 00000000000..737dcfae3c0 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/LongArrayCodecTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class LongArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.LONG_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new long[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new long[] {1, 2, 3})) + .isEqualTo( + "0x" + + "00000003" // number of elements + + "00000008" // size of element 1 + + "0000000000000001" // contents of element 1 + + "00000008" // size of element 2 + + "0000000000000002" // contents of element 2 + + "00000008" // size of element 3 + + "0000000000000003" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000003" // number of elements + + "00000008" // size of element 1 + + "0000000000000001" // contents of element 1 + + "00000008" // size of element 2 + + "0000000000000002" // contents of element 2 + + "00000008" // size of element 3 + + "0000000000000003" // contents of element 3 + )) + .containsExactly(1L, 2L, 3L); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new long[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new long[] {1, 2, 3})).isEqualTo("[1,2,3]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[1,2,3]")).containsExactly(1L, 2L, 3L); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Long.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Long.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Long.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Long.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new long[] {1, 2, 3})).isTrue(); + assertThat(codec.accepts(new Long[] {1L, 2L, 3L})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java new file mode 100644 index 00000000000..a2afc652002 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ObjectArrayCodecTest.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import com.datastax.oss.protocol.internal.util.Bytes; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectArrayCodecTest extends CodecTestBase { + + @Mock private TypeCodec elementCodec; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + when(elementCodec.getCqlType()).thenReturn(DataTypes.TEXT); + when(elementCodec.getJavaType()).thenReturn(GenericType.STRING); + codec = ExtraTypeCodecs.listToArrayOf(elementCodec); + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new String[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + when(elementCodec.encode("hello", ProtocolVersion.DEFAULT)) + .thenReturn(Bytes.fromHexString("0x68656c6c6f")); + when(elementCodec.encode("world", ProtocolVersion.DEFAULT)) + .thenReturn(Bytes.fromHexString("0x776f726c64")); + assertThat(encode(new String[] {"hello", "world"})) + .isEqualTo( + "0x" + + "00000002" // number of elements + + "00000005" // size of element 1 + + "68656c6c6f" // contents of element 1 + + "00000005" // size of element 2 + + "776f726c64" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + when(elementCodec.decode(Bytes.fromHexString("0x68656c6c6f"), ProtocolVersion.DEFAULT)) + .thenReturn("hello"); + when(elementCodec.decode(Bytes.fromHexString("0x776f726c64"), ProtocolVersion.DEFAULT)) + .thenReturn("world"); + assertThat( + decode( + "0x" + + "00000002" // number of elements + + "00000005" // size of element 1 + + "68656c6c6f" // contents of element 1 + + "00000005" // size of element 2 + + "776f726c64" // contents of element 3 + )) + .containsExactly("hello", "world"); + } + + @Test + public void should_decode_array_with_null_elements() { + when(elementCodec.decode(Bytes.fromHexString("0x68656c6c6f"), ProtocolVersion.DEFAULT)) + .thenReturn("hello"); + assertThat( + decode( + "0x" + + "00000002" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + + "00000005" // size of element 2 + + "68656c6c6f" // contents of element 2 + )) + .containsExactly(null, "hello"); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new String[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + when(elementCodec.format("hello")).thenReturn("'hello'"); + when(elementCodec.format("world")).thenReturn("'world'"); + assertThat(format(new String[] {"hello", "world"})).isEqualTo("['hello','world']"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + when(elementCodec.parse("'hello'")).thenReturn("hello"); + when(elementCodec.parse("'world'")).thenReturn("world"); + assertThat(parse("['hello','world']")).containsExactly("hello", "world"); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Integer.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Integer.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new String[] {"hello", "world"})).isTrue(); + assertThat(codec.accepts(new Integer[] {1, 2, 3})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java new file mode 100644 index 00000000000..3d489ada38f --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/array/ShortArrayCodecTest.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.array; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class ShortArrayCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.SHORT_LIST_TO_ARRAY; + } + + @Test + public void should_encode_null() { + assertThat(encode(null)).isNull(); + } + + @Test + public void should_encode_empty_array() { + assertThat(encode(new short[] {})).isEqualTo("0x00000000"); + } + + @Test + public void should_encode_non_empty_array() { + assertThat(encode(new short[] {1, 2, 3})) + .isEqualTo( + "0x" + + "00000003" // number of elements + + "00000002" // size of element 1 + + "0001" // contents of element 1 + + "00000002" // size of element 2 + + "0002" // contents of element 2 + + "00000002" // size of element 3 + + "0003" // contents of element 3 + ); + } + + @Test + public void should_decode_null_as_empty_array() { + assertThat(decode(null)).isEmpty(); + } + + @Test + public void should_decode_empty_array() { + assertThat(decode("0x00000000")).isEmpty(); + } + + @Test + public void should_decode_non_empty_array() { + assertThat( + decode( + "0x" + + "00000003" // number of elements + + "00000002" // size of element 1 + + "0001" // contents of element 1 + + "00000002" // size of element 2 + + "0002" // contents of element 2 + + "00000002" // size of element 3 + + "0003" // contents of element 3 + )) + .containsExactly((short) 1, (short) 2, (short) 3); + } + + @Test(expected = NullPointerException.class) + public void should_not_decode_array_with_null_elements() { + decode( + "0x" + + "00000001" // number of elements + + "FFFFFFFF" // size of element 1 (-1 for null) + ); + } + + @Test + public void should_format_null_array() { + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_format_empty_array() { + assertThat(format(new short[] {})).isEqualTo("[]"); + } + + @Test + public void should_format_non_empty_array() { + assertThat(format(new short[] {1, 2, 3})).isEqualTo("[1,2,3]"); + } + + @Test + public void should_parse_null_or_empty_string() { + assertThat(parse(null)).isNull(); + assertThat(parse("")).isNull(); + } + + @Test + public void should_parse_empty_array() { + assertThat(parse("[]")).isEmpty(); + } + + @Test + public void should_parse_non_empty_array() { + assertThat(parse("[1,2,3]")).containsExactly((short) 1, (short) 2, (short) 3); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_malformed_array() { + parse("not an array"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.arrayOf(Short.TYPE))).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Short.class))).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(GenericType.arrayOf(Short.TYPE).getRawType())).isTrue(); + assertThat(codec.accepts(GenericType.arrayOf(Short.class).getRawType())).isFalse(); + assertThat(codec.accepts(GenericType.arrayOf(String.class).getRawType())).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(new short[] {1, 2, 3})).isTrue(); + assertThat(codec.accepts(new Short[] {1, 2, 3})).isFalse(); + assertThat(codec.accepts(new String[] {"hello", "world"})).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java new file mode 100644 index 00000000000..093ec8a0be8 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumNameCodecTest.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.enums; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class EnumNameCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.enumNamesOf(DefaultProtocolVersion.class); + } + + @Test + public void should_encode() { + // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try + // a thousand different values. + assertThat(encode(DefaultProtocolVersion.V3)).isEqualTo("0x5633"); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + assertThat(decode("0x5633")).isEqualTo(DefaultProtocolVersion.V3); + assertThat(decode("0x")).isNull(); + assertThat(decode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_invalid_name() { + decode("0x1234"); + } + + @Test + public void should_format() { + assertThat(format(DefaultProtocolVersion.V3)).isEqualTo("'V3'"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse("'V3'")).isEqualTo(DefaultProtocolVersion.V3); + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_input() { + parse("not a valid enum constant"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.of(DefaultProtocolVersion.class))).isTrue(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(DefaultProtocolVersion.class)).isTrue(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(DefaultProtocolVersion.V3)).isTrue(); + assertThat(codec.accepts(DseProtocolVersion.DSE_V1)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java new file mode 100644 index 00000000000..7162bc51ff2 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/enums/EnumOrdinalCodecTest.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.enums; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import org.junit.Before; +import org.junit.Test; + +public class EnumOrdinalCodecTest extends CodecTestBase { + + @Before + public void setup() { + codec = ExtraTypeCodecs.enumOrdinalsOf(DefaultProtocolVersion.class); + } + + @Test + public void should_encode() { + // Our codec relies on the JDK's ByteBuffer API. We're not testing the JDK, so no need to try + // a thousand different values. + assertThat(encode(DefaultProtocolVersion.values()[0])).isEqualTo("0x00000000"); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + assertThat(decode("0x00000000")).isEqualTo(DefaultProtocolVersion.values()[0]); + assertThat(decode("0x")).isNull(); + assertThat(decode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_not_enough_bytes() { + decode("0x0000"); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_too_many_bytes() { + decode("0x0000000000000000"); + } + + @Test + public void should_format() { + assertThat(format(DefaultProtocolVersion.values()[0])).isEqualTo("0"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse("0")).isEqualTo(DefaultProtocolVersion.values()[0]); + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_input() { + parse("not an int"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.of(DefaultProtocolVersion.class))).isTrue(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(DefaultProtocolVersion.class)).isTrue(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(DefaultProtocolVersion.V3)).isTrue(); + assertThat(codec.accepts(DseProtocolVersion.DSE_V1)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java new file mode 100644 index 00000000000..f9c37075b36 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/json/JsonCodecTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.json; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Collections; +import java.util.Set; +import org.junit.Before; +import org.junit.Test; + +public class JsonCodecTest extends CodecTestBase> { + + private static final InetAddress V4_ADDRESS; + private static final InetAddress V6_ADDRESS; + private static final Set SET_OF_ADDRESSES; + + static { + try { + V4_ADDRESS = InetAddress.getByName("127.0.0.1"); + V6_ADDRESS = InetAddress.getByName("::1"); + SET_OF_ADDRESSES = ImmutableSet.of(V4_ADDRESS, V6_ADDRESS); + } catch (UnknownHostException e) { + fail("unexpected error", e); + throw new AssertionError(); // never reached + } + } + + @Before + public void setup() { + this.codec = ExtraTypeCodecs.json(GenericType.setOf(GenericType.INET_ADDRESS)); + } + + @Test + public void should_encode() { + assertThat(encode(SET_OF_ADDRESSES)) + .isEqualTo(encodeJson("[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]")); + assertThat(encode(Collections.emptySet())).isEqualTo(encodeJson("[]")); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + assertThat(decode(encodeJson("[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]"))) + .isEqualTo(SET_OF_ADDRESSES); + assertThat(decode(encodeJson("[]"))).isEqualTo(Collections.emptySet()); + assertThat(decode(null)).isNull(); + } + + @Test + public void should_format() { + assertThat(format(SET_OF_ADDRESSES)).isEqualTo("'[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]'"); + assertThat(format(Collections.emptySet())).isEqualTo("'[]'"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse("'[\"127.0.0.1\",\"0:0:0:0:0:0:0:1\"]'")).isEqualTo(SET_OF_ADDRESSES); + assertThat(parse("'[]'")).isEqualTo(Collections.emptySet()); + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_parse_invalid_input() { + parse("not a JSON string"); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.setOf(GenericType.INET_ADDRESS))).isTrue(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(Set.class)).isTrue(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(SET_OF_ADDRESSES)).isTrue(); + assertThat(codec.accepts(Collections.emptySet())).isTrue(); + assertThat(codec.accepts(Collections.singletonList(V4_ADDRESS))).isFalse(); + } + + private String encodeJson(String json) { + return Bytes.toHexString(TypeCodecs.TEXT.encode(json, ProtocolVersion.DEFAULT)); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java new file mode 100644 index 00000000000..7d87cbbba9f --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/LocalTimestampCodecTest.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.time; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class LocalTimestampCodecTest extends CodecTestBase { + + @Test + public void should_encode() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC; + assertThat(encode(Instant.EPOCH.atZone(ZoneOffset.UTC).toLocalDateTime())) + .isEqualTo("0x0000000000000000"); + assertThat(encode(Instant.ofEpochMilli(128).atZone(ZoneOffset.UTC).toLocalDateTime())) + .isEqualTo("0x0000000000000080"); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC; + assertThat(decode("0x0000000000000000")) + .isEqualTo(Instant.EPOCH.atZone(ZoneOffset.UTC).toLocalDateTime()); + assertThat(decode("0x0000000000000080")) + .isEqualTo(Instant.ofEpochMilli(128).atZone(ZoneOffset.UTC).toLocalDateTime()); + assertThat(decode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_not_enough_bytes() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; + decode("0x0000"); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_too_many_bytes() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; + decode("0x0000000000000000" + "0000"); + } + + @Test + public void should_format() { + codec = ExtraTypeCodecs.localTimestampAt(ZoneOffset.ofHours(2)); + // No need to test various values because the codec delegates directly to SimpleDateFormat, + // which we assume does its job correctly. + assertThat(format(LocalDateTime.parse("2018-08-16T16:59:34.123"))) + .isEqualTo("'2018-08-16T16:59:34.123+02:00'"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) + public void should_parse(ZoneId defaultTimeZone) { + codec = ExtraTypeCodecs.localTimestampAt(defaultTimeZone); + + // Raw numbers + assertThat(parse("'0'")).isEqualTo(Instant.EPOCH.atZone(defaultTimeZone).toLocalDateTime()); + assertThat(parse("'-1'")) + .isEqualTo(Instant.EPOCH.minusMillis(1).atZone(defaultTimeZone).toLocalDateTime()); + assertThat(parse("1534463100000")) + .isEqualTo(Instant.ofEpochMilli(1534463100000L).atZone(defaultTimeZone).toLocalDateTime()); + + // Date formats + LocalDateTime expected; + + // date without time, without time zone + expected = LocalDate.parse("2017-01-01").atStartOfDay(); + assertThat(parse("'2017-01-01'")).isEqualTo(expected); + + // date without time, with time zone + expected = + ZonedDateTime.parse("2018-08-16T00:00:00+02:00") + .withZoneSameInstant(defaultTimeZone) + .toLocalDateTime(); + assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); + assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); + assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); + assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); + + // date with time, without time zone + expected = LocalDateTime.parse("2018-08-16T23:45"); + assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); + assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); + + // date with time + seconds, without time zone + expected = LocalDateTime.parse("2019-12-31T16:08:38"); + assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); + assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); + + // date with time + seconds + milliseconds, without time zone + expected = LocalDateTime.parse("1950-02-28T12:00:59.230"); + assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); + assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); + + // date with time, with time zone + expected = + ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00") + .withZoneSameInstant(defaultTimeZone) + .toLocalDateTime(); + assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); + assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); + assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); + assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); + + // date with time + seconds, with time zone + expected = + ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00") + .withZoneSameInstant(defaultTimeZone) + .toLocalDateTime(); + assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); + assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); + assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); + assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); + + // date with time + seconds + milliseconds, with time zone + expected = + ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00") + .withZoneSameInstant(defaultTimeZone) + .toLocalDateTime(); + assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); + assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); + + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test + public void should_fail_to_parse_invalid_input() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; + assertThatThrownBy(() -> parse("not a timestamp")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); + assertThatThrownBy(() -> parse("'not a timestamp'")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); + } + + @Test + public void should_accept_generic_type() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; + assertThat(codec.accepts(GenericType.LOCAL_DATE_TIME)).isTrue(); + assertThat(codec.accepts(GenericType.INSTANT)).isFalse(); + } + + @Test + public void should_accept_raw_type() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; + assertThat(codec.accepts(LocalDateTime.class)).isTrue(); + assertThat(codec.accepts(Instant.class)).isFalse(); + } + + @Test + public void should_accept_object() { + codec = ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM; + assertThat(codec.accepts(LocalDateTime.now(ZoneId.systemDefault()))).isTrue(); + assertThat(codec.accepts(Instant.EPOCH)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java new file mode 100644 index 00000000000..9bf1cac1007 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/PersistentZonedTimestampCodecTest.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.time; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import org.junit.Before; +import org.junit.Test; + +public class PersistentZonedTimestampCodecTest extends CodecTestBase { + + private static final ZonedDateTime EPOCH_UTC = Instant.EPOCH.atZone(ZoneOffset.UTC); + + private static final ZonedDateTime EPOCH_MILLIS_CET = + Instant.ofEpochMilli(128).atZone(ZoneId.of("CET")); + + private static final ZonedDateTime EPOCH_MILLIS_OFFSET = + Instant.ofEpochMilli(128).atZone(ZoneOffset.ofHours(2)); + + private static final ZonedDateTime EPOCH_MILLIS_EUROPE_PARIS = + Instant.ofEpochMilli(-128).atZone(ZoneId.of("Europe/Paris")); + + private static final String EPOCH_UTC_ENCODED = + "0x" + + ("00000008" + "0000000000000000") // size and contents of timestamp + + ("00000001" + "5a"); // size and contents of zone ID + + private static final String EPOCH_MILLIS_CET_ENCODED = + "0x" + + ("00000008" + "0000000000000080") // size and contents of timestamp + + ("00000003" + "434554"); // size and contents of zone ID + + private static final String EPOCH_MILLIS_OFFSET_ENCODED = + "0x" + + ("00000008" + "0000000000000080") // size and contents of timestamp + + ("00000006" + "2b30323a3030"); // size and contents of zone ID + + private static final String EPOCH_MILLIS_EUROPE_PARIS_ENCODED = + "0x" + + ("00000008" + "ffffffffffffff80") // size and contents of timestamp + + ("0000000c" + "4575726f70652f5061726973"); // size and contents of zone ID + + private static final String EPOCH_UTC_FORMATTED = "('1970-01-01T00:00:00.000Z','Z')"; + + private static final String EPOCH_MILLIS_CET_FORMATTED = "('1970-01-01T00:00:00.128Z','CET')"; + + private static final String EPOCH_MILLIS_OFFSET_FORMATTED = + "('1970-01-01T00:00:00.128Z','+02:00')"; + + private static final String EPOCH_MILLIS_EUROPE_PARIS_FORMATTED = + "('1969-12-31T23:59:59.872Z','Europe/Paris')"; + + @Before + public void setup() { + codec = ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED; + } + + @Test + public void should_encode() { + assertThat(encode(EPOCH_UTC)).isEqualTo(EPOCH_UTC_ENCODED); + assertThat(encode(EPOCH_MILLIS_CET)).isEqualTo(EPOCH_MILLIS_CET_ENCODED); + assertThat(encode(EPOCH_MILLIS_OFFSET)).isEqualTo(EPOCH_MILLIS_OFFSET_ENCODED); + assertThat(encode(EPOCH_MILLIS_EUROPE_PARIS)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS_ENCODED); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + assertThat(decode(EPOCH_UTC_ENCODED)).isEqualTo(EPOCH_UTC); + assertThat(decode(EPOCH_MILLIS_CET_ENCODED)).isEqualTo(EPOCH_MILLIS_CET); + assertThat(decode(EPOCH_MILLIS_OFFSET_ENCODED)).isEqualTo(EPOCH_MILLIS_OFFSET); + assertThat(decode(EPOCH_MILLIS_EUROPE_PARIS_ENCODED)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS); + assertThat(decode(null)).isNull(); + } + + @Test + public void should_format() { + assertThat(format(EPOCH_UTC)).isEqualTo(EPOCH_UTC_FORMATTED); + assertThat(format(EPOCH_MILLIS_CET)).isEqualTo(EPOCH_MILLIS_CET_FORMATTED); + assertThat(format(EPOCH_MILLIS_OFFSET)).isEqualTo(EPOCH_MILLIS_OFFSET_FORMATTED); + assertThat(format(EPOCH_MILLIS_EUROPE_PARIS)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS_FORMATTED); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + public void should_parse() { + assertThat(parse(EPOCH_UTC_FORMATTED)).isEqualTo(EPOCH_UTC); + assertThat(parse(EPOCH_MILLIS_CET_FORMATTED)).isEqualTo(EPOCH_MILLIS_CET); + assertThat(parse(EPOCH_MILLIS_OFFSET_FORMATTED)).isEqualTo(EPOCH_MILLIS_OFFSET); + assertThat(parse(EPOCH_MILLIS_EUROPE_PARIS_FORMATTED)).isEqualTo(EPOCH_MILLIS_EUROPE_PARIS); + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test + public void should_accept_generic_type() { + assertThat(codec.accepts(GenericType.of(ZonedDateTime.class))).isTrue(); + assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); + } + + @Test + public void should_accept_raw_type() { + assertThat(codec.accepts(ZonedDateTime.class)).isTrue(); + assertThat(codec.accepts(Integer.class)).isFalse(); + } + + @Test + public void should_accept_object() { + assertThat(codec.accepts(ZonedDateTime.now(ZoneOffset.systemDefault()))).isTrue(); + assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java new file mode 100644 index 00000000000..36ee71eebe6 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/TimestampMillisCodecTest.java @@ -0,0 +1,205 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.extras.time; + +import static java.time.ZoneOffset.ofHours; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class TimestampMillisCodecTest extends CodecTestBase { + + @Test + public void should_encode() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; + assertThat(encode(0L)).isEqualTo("0x0000000000000000"); + assertThat(encode(128L)).isEqualTo("0x0000000000000080"); + assertThat(encode(null)).isNull(); + } + + @Test + public void should_decode() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; + assertThat(decode("0x0000000000000000")).isEqualTo(0L); + assertThat(decode("0x0000000000000080")).isEqualTo(128L); + assertThat(decode(null)).isNull(); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_not_enough_bytes() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; + decode("0x0000"); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_to_decode_if_too_many_bytes() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; + decode("0x0000000000000000" + "0000"); + } + + @Test + public void should_format() { + codec = ExtraTypeCodecs.timestampMillisAt(ZoneOffset.ofHours(2)); + // No need to test various values because the codec delegates directly to SimpleDateFormat, + // which we assume does its job correctly. + assertThat(format(0L)).isEqualTo("'1970-01-01T02:00:00.000+02:00'"); + assertThat(format(1534435174123L)).isEqualTo("'2018-08-16T17:59:34.123+02:00'"); + assertThat(format(null)).isEqualTo("NULL"); + } + + @Test + @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) + public void should_parse(ZoneId defaultTimeZone) { + codec = ExtraTypeCodecs.timestampMillisAt(defaultTimeZone); + + // Raw numbers + assertThat(parse("'0'")).isEqualTo(0L); + assertThat(parse("'-1'")).isEqualTo(-1L); + assertThat(parse("1534463100000")).isEqualTo(1534463100000L); + + // Date formats + long expected; + + // date without time, without time zone + expected = + LocalDate.parse("2017-01-01") + .atStartOfDay() + .atZone(defaultTimeZone) + .toInstant() + .toEpochMilli(); + assertThat(parse("'2017-01-01'")).isEqualTo(expected); + + // date without time, with time zone + expected = + LocalDate.parse("2018-08-16").atStartOfDay().atZone(ofHours(2)).toInstant().toEpochMilli(); + assertThat(parse("'2018-08-16+02'")).isEqualTo(expected); + assertThat(parse("'2018-08-16+0200'")).isEqualTo(expected); + assertThat(parse("'2018-08-16+02:00'")).isEqualTo(expected); + assertThat(parse("'2018-08-16 CEST'")).isEqualTo(expected); + + // date with time, without time zone + expected = + LocalDateTime.parse("2018-08-16T23:45").atZone(defaultTimeZone).toInstant().toEpochMilli(); + assertThat(parse("'2018-08-16T23:45'")).isEqualTo(expected); + assertThat(parse("'2018-08-16 23:45'")).isEqualTo(expected); + + // date with time + seconds, without time zone + expected = + LocalDateTime.parse("2019-12-31T16:08:38") + .atZone(defaultTimeZone) + .toInstant() + .toEpochMilli(); + assertThat(parse("'2019-12-31T16:08:38'")).isEqualTo(expected); + assertThat(parse("'2019-12-31 16:08:38'")).isEqualTo(expected); + + // date with time + seconds + milliseconds, without time zone + expected = + LocalDateTime.parse("1950-02-28T12:00:59.230") + .atZone(defaultTimeZone) + .toInstant() + .toEpochMilli(); + assertThat(parse("'1950-02-28T12:00:59.230'")).isEqualTo(expected); + assertThat(parse("'1950-02-28 12:00:59.230'")).isEqualTo(expected); + + // date with time, with time zone + expected = ZonedDateTime.parse("1973-06-23T23:59:00.000+01:00").toInstant().toEpochMilli(); + assertThat(parse("'1973-06-23T23:59+01'")).isEqualTo(expected); + assertThat(parse("'1973-06-23T23:59+0100'")).isEqualTo(expected); + assertThat(parse("'1973-06-23T23:59+01:00'")).isEqualTo(expected); + assertThat(parse("'1973-06-23T23:59 CET'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59+01'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59+0100'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59+01:00'")).isEqualTo(expected); + assertThat(parse("'1973-06-23 23:59 CET'")).isEqualTo(expected); + + // date with time + seconds, with time zone + expected = ZonedDateTime.parse("1980-01-01T23:59:59.000-08:00").toInstant().toEpochMilli(); + assertThat(parse("'1980-01-01T23:59:59-08'")).isEqualTo(expected); + assertThat(parse("'1980-01-01T23:59:59-0800'")).isEqualTo(expected); + assertThat(parse("'1980-01-01T23:59:59-08:00'")).isEqualTo(expected); + assertThat(parse("'1980-01-01T23:59:59 PST'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59-08'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59-0800'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59-08:00'")).isEqualTo(expected); + assertThat(parse("'1980-01-01 23:59:59 PST'")).isEqualTo(expected); + + // date with time + seconds + milliseconds, with time zone + expected = ZonedDateTime.parse("1999-12-31T23:59:59.999+00:00").toInstant().toEpochMilli(); + assertThat(parse("'1999-12-31T23:59:59.999+00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31T23:59:59.999+0000'")).isEqualTo(expected); + assertThat(parse("'1999-12-31T23:59:59.999+00:00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31T23:59:59.999 UTC'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999+00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999+0000'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999+00:00'")).isEqualTo(expected); + assertThat(parse("'1999-12-31 23:59:59.999 UTC'")).isEqualTo(expected); + + assertThat(parse("NULL")).isNull(); + assertThat(parse("null")).isNull(); + assertThat(parse("")).isNull(); + assertThat(parse(null)).isNull(); + } + + @Test + public void should_fail_to_parse_invalid_input() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM; + assertThatThrownBy(() -> parse("not a timestamp")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Alphanumeric timestamp literal must be quoted: \"not a timestamp\""); + assertThatThrownBy(() -> parse("'not a timestamp'")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Cannot parse timestamp value from \"'not a timestamp'\""); + } + + @Test + public void should_accept_generic_type() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; + assertThat(codec.accepts(GenericType.LONG)).isTrue(); + assertThat(codec.accepts(GenericType.INSTANT)).isFalse(); + } + + @Test + public void should_accept_raw_type() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; + assertThat(codec.accepts(Long.class)).isTrue(); + assertThat(codec.accepts(Long.TYPE)).isTrue(); + assertThat(codec.accepts(Instant.class)).isFalse(); + } + + @Test + public void should_accept_object() { + codec = ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC; + assertThat(codec.accepts(Long.MIN_VALUE)).isTrue(); + assertThat(codec.accepts(Instant.EPOCH)).isFalse(); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ZonedTimestampCodecTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java similarity index 86% rename from core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ZonedTimestampCodecTest.java rename to core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java index 5fb73d0ec76..cd31d13d5ca 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/ZonedTimestampCodecTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/extras/time/ZonedTimestampCodecTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,14 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.internal.core.type.codec; +package com.datastax.oss.driver.internal.core.type.codec.extras.time; import static java.time.ZoneOffset.ofHours; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.type.codec.CodecTestBase; +import com.datastax.oss.driver.internal.core.type.codec.TimestampCodecTest; import com.tngtech.java.junit.dataprovider.DataProviderRunner; import com.tngtech.java.junit.dataprovider.UseDataProvider; import java.time.Instant; @@ -38,7 +42,7 @@ public class ZonedTimestampCodecTest extends CodecTestBase { @Test @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) public void should_encode(ZoneId timeZone) { - codec = TypeCodecs.zonedTimestampAt(timeZone); + codec = ExtraTypeCodecs.zonedTimestampAt(timeZone); assertThat(encode(Instant.EPOCH.atZone(timeZone))).isEqualTo("0x0000000000000000"); assertThat(encode(Instant.ofEpochMilli(128).atZone(timeZone))).isEqualTo("0x0000000000000080"); assertThat(encode(null)).isNull(); @@ -46,7 +50,7 @@ public void should_encode(ZoneId timeZone) { @Test public void should_decode() { - codec = TypeCodecs.ZONED_TIMESTAMP_UTC; + codec = ExtraTypeCodecs.ZONED_TIMESTAMP_UTC; assertThat(decode("0x0000000000000000").toInstant().toEpochMilli()).isEqualTo(0); assertThat(decode("0x0000000000000080").toInstant().toEpochMilli()).isEqualTo(128); assertThat(decode(null)).isNull(); @@ -54,19 +58,19 @@ public void should_decode() { @Test(expected = IllegalArgumentException.class) public void should_fail_to_decode_if_not_enough_bytes() { - codec = TypeCodecs.ZONED_TIMESTAMP_SYSTEM; + codec = ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; decode("0x0000"); } @Test(expected = IllegalArgumentException.class) public void should_fail_to_decode_if_too_many_bytes() { - codec = TypeCodecs.ZONED_TIMESTAMP_SYSTEM; + codec = ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM; decode("0x0000000000000000" + "0000"); } @Test public void should_format() { - codec = TypeCodecs.zonedTimestampAt(ZoneOffset.ofHours(2)); + codec = ExtraTypeCodecs.zonedTimestampAt(ZoneOffset.ofHours(2)); // No need to test various values because the codec delegates directly to SimpleDateFormat, // which we assume does its job correctly. assertThat(format(Instant.EPOCH.atZone(ZoneOffset.UTC))) @@ -79,7 +83,7 @@ public void should_format() { @Test @UseDataProvider(value = "timeZones", location = TimestampCodecTest.class) public void should_parse(ZoneId timeZone) { - codec = TypeCodecs.zonedTimestampAt(timeZone); + codec = ExtraTypeCodecs.zonedTimestampAt(timeZone); // Raw numbers assertThat(parse("'0'")).isEqualTo(Instant.EPOCH.atZone(timeZone)); @@ -183,7 +187,7 @@ public void should_accept_raw_type() { @Test public void should_accept_object() { codec = new ZonedTimestampCodec(); - assertThat(codec.accepts(ZonedDateTime.now())).isTrue(); + assertThat(codec.accepts(ZonedDateTime.now(ZoneOffset.systemDefault()))).isTrue(); assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); } } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java index 271bb74a2be..231f67a93e7 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,56 +21,42 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.fail; import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; -import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.data.CqlDuration; import com.datastax.oss.driver.api.core.data.TupleValue; import com.datastax.oss.driver.api.core.data.UdtValue; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.ListType; -import com.datastax.oss.driver.api.core.type.MapType; -import com.datastax.oss.driver.api.core.type.SetType; -import com.datastax.oss.driver.api.core.type.TupleType; -import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; +import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; +import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; +import com.datastax.oss.driver.internal.core.type.codec.IntCodec; import com.datastax.oss.driver.internal.core.type.codec.ListCodec; import com.datastax.oss.driver.internal.core.type.codec.registry.CachingCodecRegistryTest.TestCachingCodecRegistry.MockCache; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; -import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.net.Inet4Address; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.nio.ByteBuffer; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; import java.time.Period; -import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +@RunWith(DataProviderRunner.class) public class CachingCodecRegistryTest { @Mock private MockCache mockCache; @@ -79,91 +67,40 @@ public void setup() { } @Test - public void should_find_primitive_codecs_for_types() { + @UseDataProvider( + value = "primitiveCodecs", + location = CachingCodecRegistryTestDataProviders.class) + public void should_find_primitive_codecs_for_types(TypeCodec codec) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - checkPrimitiveMappings(registry, TypeCodecs.BOOLEAN); - checkPrimitiveMappings(registry, TypeCodecs.TINYINT); - checkPrimitiveMappings(registry, TypeCodecs.DOUBLE); - checkPrimitiveMappings(registry, TypeCodecs.COUNTER); - checkPrimitiveMappings(registry, TypeCodecs.FLOAT); - checkPrimitiveMappings(registry, TypeCodecs.INT); - checkPrimitiveMappings(registry, TypeCodecs.BIGINT); - checkPrimitiveMappings(registry, TypeCodecs.SMALLINT); - checkPrimitiveMappings(registry, TypeCodecs.TIMESTAMP); - checkPrimitiveMappings(registry, TypeCodecs.DATE); - checkPrimitiveMappings(registry, TypeCodecs.TIME); - checkPrimitiveMappings(registry, TypeCodecs.BLOB); - checkPrimitiveMappings(registry, TypeCodecs.TEXT); - checkPrimitiveMappings(registry, TypeCodecs.ASCII); - checkPrimitiveMappings(registry, TypeCodecs.VARINT); - checkPrimitiveMappings(registry, TypeCodecs.DECIMAL); - checkPrimitiveMappings(registry, TypeCodecs.UUID); - checkPrimitiveMappings(registry, TypeCodecs.TIMEUUID); - checkPrimitiveMappings(registry, TypeCodecs.INET); - checkPrimitiveMappings(registry, TypeCodecs.DURATION); - // Primitive mappings never hit the cache - verifyZeroInteractions(mockCache); - } - - private void checkPrimitiveMappings(TestCachingCodecRegistry registry, TypeCodec codec) { DataType cqlType = codec.getCqlType(); GenericType javaType = codec.getJavaType(); - assertThat(registry.codecFor(cqlType, javaType)).isSameAs(codec); assertThat(registry.codecFor(cqlType)).isSameAs(codec); - assertThat(javaType.__getToken().getType()).isInstanceOf(Class.class); Class javaClass = (Class) javaType.__getToken().getType(); assertThat(registry.codecFor(cqlType, javaClass)).isSameAs(codec); + // Primitive mappings never hit the cache + verifyZeroInteractions(mockCache); } @Test - public void should_find_primitive_codecs_for_value() throws Exception { + @UseDataProvider( + value = "primitiveCodecsWithValues", + location = CachingCodecRegistryTestDataProviders.class) + public void should_find_primitive_codecs_for_value(Object value, TypeCodec codec) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThat(registry.codecFor(true)).isEqualTo(TypeCodecs.BOOLEAN); - assertThat(registry.codecFor((byte) 0)).isEqualTo(TypeCodecs.TINYINT); - assertThat(registry.codecFor(0.0)).isEqualTo(TypeCodecs.DOUBLE); - assertThat(registry.codecFor(0.0f)).isEqualTo(TypeCodecs.FLOAT); - assertThat(registry.codecFor(0)).isEqualTo(TypeCodecs.INT); - assertThat(registry.codecFor(0L)).isEqualTo(TypeCodecs.BIGINT); - assertThat(registry.codecFor((short) 0)).isEqualTo(TypeCodecs.SMALLINT); - assertThat(registry.codecFor(Instant.EPOCH)).isEqualTo(TypeCodecs.TIMESTAMP); - assertThat(registry.codecFor(LocalDate.MIN)).isEqualTo(TypeCodecs.DATE); - assertThat(registry.codecFor(LocalTime.MIDNIGHT)).isEqualTo(TypeCodecs.TIME); - assertThat(registry.codecFor(ByteBuffer.allocate(0))).isEqualTo(TypeCodecs.BLOB); - assertThat(registry.codecFor("")).isEqualTo(TypeCodecs.TEXT); - assertThat(registry.codecFor(BigInteger.ONE)).isEqualTo(TypeCodecs.VARINT); - assertThat(registry.codecFor(BigDecimal.ONE)).isEqualTo(TypeCodecs.DECIMAL); - assertThat(registry.codecFor(new UUID(2L, 1L))).isEqualTo(TypeCodecs.UUID); - assertThat(registry.codecFor(InetAddress.getByName("127.0.0.1"))).isEqualTo(TypeCodecs.INET); - assertThat(registry.codecFor(CqlDuration.newInstance(1, 2, 3))).isEqualTo(TypeCodecs.DURATION); + assertThat(registry.codecFor(value)).isEqualTo(codec); verifyZeroInteractions(mockCache); } @Test - public void should_find_primitive_codecs_for_cql_type_and_value() throws Exception { + @UseDataProvider( + value = "primitiveCodecsWithCqlTypesAndValues", + location = CachingCodecRegistryTestDataProviders.class) + public void should_find_primitive_codecs_for_cql_type_and_value( + DataType cqlType, Object value, TypeCodec codec) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - assertThat(registry.codecFor(DataTypes.BOOLEAN, true)).isEqualTo(TypeCodecs.BOOLEAN); - assertThat(registry.codecFor(DataTypes.TINYINT, (byte) 0)).isEqualTo(TypeCodecs.TINYINT); - assertThat(registry.codecFor(DataTypes.DOUBLE, 0.0)).isEqualTo(TypeCodecs.DOUBLE); - assertThat(registry.codecFor(DataTypes.FLOAT, 0.0f)).isEqualTo(TypeCodecs.FLOAT); - assertThat(registry.codecFor(DataTypes.INT, 0)).isEqualTo(TypeCodecs.INT); - assertThat(registry.codecFor(DataTypes.BIGINT, 0L)).isEqualTo(TypeCodecs.BIGINT); - assertThat(registry.codecFor(DataTypes.SMALLINT, (short) 0)).isEqualTo(TypeCodecs.SMALLINT); - assertThat(registry.codecFor(DataTypes.TIMESTAMP, Instant.EPOCH)) - .isEqualTo(TypeCodecs.TIMESTAMP); - assertThat(registry.codecFor(DataTypes.DATE, LocalDate.MIN)).isEqualTo(TypeCodecs.DATE); - assertThat(registry.codecFor(DataTypes.TIME, LocalTime.MIDNIGHT)).isEqualTo(TypeCodecs.TIME); - assertThat(registry.codecFor(DataTypes.BLOB, ByteBuffer.allocate(0))) - .isEqualTo(TypeCodecs.BLOB); - assertThat(registry.codecFor(DataTypes.TEXT, "")).isEqualTo(TypeCodecs.TEXT); - assertThat(registry.codecFor(DataTypes.VARINT, BigInteger.ONE)).isEqualTo(TypeCodecs.VARINT); - assertThat(registry.codecFor(DataTypes.DECIMAL, BigDecimal.ONE)).isEqualTo(TypeCodecs.DECIMAL); - assertThat(registry.codecFor(DataTypes.UUID, new UUID(2L, 1L))).isEqualTo(TypeCodecs.UUID); - assertThat(registry.codecFor(DataTypes.INET, InetAddress.getByName("127.0.0.1"))) - .isEqualTo(TypeCodecs.INET); - assertThat(registry.codecFor(DataTypes.DURATION, CqlDuration.newInstance(1, 2, 3))) - .isEqualTo(TypeCodecs.DURATION); + assertThat(registry.codecFor(cqlType, value)).isEqualTo(codec); verifyZeroInteractions(mockCache); } @@ -173,17 +110,18 @@ public void should_find_user_codec_for_built_in_java_type() { CqlIntToStringCodec intToStringCodec1 = new CqlIntToStringCodec(); // register a second codec to also check that the first one is preferred CqlIntToStringCodec intToStringCodec2 = new CqlIntToStringCodec(); - TestCachingCodecRegistry registry = - new TestCachingCodecRegistry(mockCache, intToStringCodec1, intToStringCodec2); + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + registry.register(intToStringCodec1, intToStringCodec2); + verify(mockCache).lookup(DataTypes.INT, GenericType.STRING, false); // When the mapping is not ambiguous, the user type should be returned assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec1); assertThat(registry.codecFor(DataTypes.INT, String.class)).isSameAs(intToStringCodec1); - assertThat(registry.codecFor(DataTypes.INT, "")).isSameAs(intToStringCodec1); + assertThat(registry.codecFor(DataTypes.INT, "123")).isSameAs(intToStringCodec1); // When there is an ambiguity with a built-in codec, the built-in codec should have priority assertThat(registry.codecFor(DataTypes.INT)).isSameAs(TypeCodecs.INT); - assertThat(registry.codecFor("")).isSameAs(TypeCodecs.TEXT); + assertThat(registry.codecFor("123")).isSameAs(TypeCodecs.TEXT); verifyZeroInteractions(mockCache); } @@ -192,8 +130,9 @@ public void should_find_user_codec_for_built_in_java_type() { public void should_find_user_codec_for_custom_java_type() { TextToPeriodCodec textToPeriodCodec1 = new TextToPeriodCodec(); TextToPeriodCodec textToPeriodCodec2 = new TextToPeriodCodec(); - TestCachingCodecRegistry registry = - new TestCachingCodecRegistry(mockCache, textToPeriodCodec1, textToPeriodCodec2); + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + registry.register(textToPeriodCodec1, textToPeriodCodec2); + verify(mockCache).lookup(DataTypes.TEXT, GenericType.of(Period.class), false); assertThat(registry.codecFor(DataTypes.TEXT, GenericType.of(Period.class))) .isSameAs(textToPeriodCodec1); @@ -209,406 +148,85 @@ public void should_find_user_codec_for_custom_java_type() { } @Test - public void should_create_list_codec_for_cql_and_java_types() { - ListType cqlType = DataTypes.listOf(DataTypes.listOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - List> value = ImmutableList.of(ImmutableList.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, javaType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - // Cache lookup for the codec, and recursively for its subcodec - inOrder.verify(mockCache).lookup(cqlType, javaType, false); - inOrder - .verify(mockCache) - .lookup(DataTypes.listOf(DataTypes.INT), GenericType.listOf(GenericType.INTEGER), false); - } - - @Test - public void should_create_list_codec_for_cql_type() { - ListType cqlType = DataTypes.listOf(DataTypes.listOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - List> value = ImmutableList.of(ImmutableList.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - inOrder.verify(mockCache).lookup(DataTypes.listOf(DataTypes.INT), null, false); - } - - @Test - public void should_create_list_codec_for_cql_type_and_java_value() { - ListType cqlType = DataTypes.listOf(DataTypes.listOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - List> value = ImmutableList.of(ImmutableList.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - inOrder - .verify(mockCache) - .lookup(DataTypes.listOf(DataTypes.INT), GenericType.listOf(GenericType.INTEGER), true); - } - - @Test - public void should_create_list_codec_for_java_value() { - ListType cqlType = DataTypes.listOf(DataTypes.listOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - List> value = ImmutableList.of(ImmutableList.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(null, javaType, true); - inOrder.verify(mockCache).lookup(null, GenericType.listOf(GenericType.INTEGER), true); - } - - @Test - public void should_create_list_codec_for_empty_java_value() { - GenericType>> javaType = - GenericType.listOf(GenericType.listOf(Boolean.class)); - List> value = Collections.singletonList(Collections.emptyList()); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(value)).isTrue(); - - // Note that empty collections without CQL type are a corner case, in that the registry returns - // a codec that does not accept cqlType, nor the value's declared Java type. - // The only requirement is that it can encode the value, which holds true: - codec.encode(value, ProtocolVersion.DEFAULT); - - inOrder.verify(mockCache).lookup(null, javaType, true); - } - - @Test - public void should_create_list_codec_for_cql_type_and_empty_java_value() { - ListType cqlType = DataTypes.listOf(DataTypes.listOf(DataTypes.INT)); - GenericType>> javaType = - GenericType.listOf(GenericType.listOf(GenericType.INTEGER)); - List> value = Collections.singletonList(Collections.emptyList()); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - // Verify that the codec can encode the value - codec.encode(value, ProtocolVersion.DEFAULT); - - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - } - - @Test - public void should_create_list_codec_for_java_value_when_first_element_is_a_subtype() - throws UnknownHostException { - ListType cqlType = DataTypes.listOf(DataTypes.INET); - GenericType> javaType = new GenericType>() {}; - InetAddress address = InetAddress.getByAddress(new byte[] {127, 0, 0, 1}); - // Because the actual implementation is a subclass, there is no exact match with the codec's - // declared type - assertThat(address).isInstanceOf(Inet4Address.class); - List value = ImmutableList.of(address); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - - inOrder.verify(mockCache).lookup(null, GenericType.listOf(Inet4Address.class), true); - } - - @Test - public void should_create_set_codec_for_cql_and_java_types() { - SetType cqlType = DataTypes.setOf(DataTypes.setOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - Set> value = ImmutableSet.of(ImmutableSet.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, javaType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - // Cache lookup for the codec, and recursively for its subcodec - inOrder.verify(mockCache).lookup(cqlType, javaType, false); - inOrder - .verify(mockCache) - .lookup(DataTypes.setOf(DataTypes.INT), GenericType.setOf(GenericType.INTEGER), false); - } - - @Test - public void should_create_set_codec_for_cql_type() { - SetType cqlType = DataTypes.setOf(DataTypes.setOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - Set> value = ImmutableSet.of(ImmutableSet.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, null, false); - inOrder.verify(mockCache).lookup(DataTypes.setOf(DataTypes.INT), null, false); - } - - @Test - public void should_create_set_codec_for_cql_type_and_java_value() { - SetType cqlType = DataTypes.setOf(DataTypes.setOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - Set> value = ImmutableSet.of(ImmutableSet.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - inOrder - .verify(mockCache) - .lookup(DataTypes.setOf(DataTypes.INT), GenericType.setOf(GenericType.INTEGER), true); - } - - @Test - public void should_create_set_codec_for_java_value() { - SetType cqlType = DataTypes.setOf(DataTypes.setOf(DataTypes.INT)); - GenericType>> javaType = new GenericType>>() {}; - Set> value = ImmutableSet.of(ImmutableSet.of(1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(null, javaType, true); - inOrder.verify(mockCache).lookup(null, GenericType.setOf(GenericType.INTEGER), true); - } - - @Test - public void should_create_set_codec_for_empty_java_value() { - GenericType>> javaType = GenericType.setOf(GenericType.setOf(Boolean.class)); - Set> value = Collections.singleton(Collections.emptySet()); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(value)).isTrue(); - - // Note that empty collections without CQL type are a corner case, in that the registry returns - // a codec that does not accept cqlType, nor the value's declared Java type. - // The only requirement is that it can encode the value, which holds true: - codec.encode(value, ProtocolVersion.DEFAULT); - - inOrder.verify(mockCache).lookup(null, javaType, true); - } - - @Test - public void should_create_set_codec_for_cql_type_and_empty_java_value() { - SetType cqlType = DataTypes.setOf(DataTypes.setOf(DataTypes.INT)); - GenericType>> javaType = GenericType.setOf(GenericType.setOf(Integer.class)); - Set> value = Collections.emptySet(); - + @UseDataProvider( + value = "collectionsWithCqlAndJavaTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_collection_codec_for_cql_and_java_types( + DataType cqlType, GenericType javaType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(value)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - // Verify that the codec can encode the value - codec.encode(value, ProtocolVersion.DEFAULT); - - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - } - - @Test - public void should_create_set_codec_for_java_value_when_first_element_is_a_subtype() - throws UnknownHostException { - SetType cqlType = DataTypes.setOf(DataTypes.INET); - GenericType> javaType = new GenericType>() {}; - InetAddress address = InetAddress.getByAddress(new byte[] {127, 0, 0, 1}); - // Because the actual implementation is a subclass, there is no exact match with the codec's - // declared type - assertThat(address).isInstanceOf(Inet4Address.class); - Set value = ImmutableSet.of(address); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - - inOrder.verify(mockCache).lookup(null, GenericType.setOf(Inet4Address.class), true); - } - - @Test - public void should_create_map_codec_for_cql_and_java_types() { - MapType cqlType = DataTypes.mapOf(DataTypes.INT, DataTypes.mapOf(DataTypes.INT, DataTypes.INT)); - GenericType>> javaType = - new GenericType>>() {}; - Map> value = ImmutableMap.of(1, ImmutableMap.of(1, 1)); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, javaType); + TypeCodec codec = registry.codecFor(cqlType, javaType); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(javaType)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - // Cache lookup for the codec, and recursively for its subcodec inOrder.verify(mockCache).lookup(cqlType, javaType, false); - inOrder - .verify(mockCache) - .lookup( - DataTypes.mapOf(DataTypes.INT, DataTypes.INT), - GenericType.mapOf(GenericType.INTEGER, GenericType.INTEGER), - false); } @Test - public void should_create_map_codec_for_cql_type() { - MapType cqlType = DataTypes.mapOf(DataTypes.INT, DataTypes.mapOf(DataTypes.INT, DataTypes.INT)); - GenericType>> javaType = - new GenericType>>() {}; - Map> value = ImmutableMap.of(1, ImmutableMap.of(1, 1)); - + @UseDataProvider( + value = "collectionsWithCqlAndJavaTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_collection_codec_for_cql_type( + DataType cqlType, GenericType javaType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType); + TypeCodec codec = registry.codecFor(cqlType); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(javaType)).isTrue(); assertThat(codec.accepts(value)).isTrue(); inOrder.verify(mockCache).lookup(cqlType, null, false); - inOrder.verify(mockCache).lookup(DataTypes.mapOf(DataTypes.INT, DataTypes.INT), null, false); } @Test - public void should_create_map_codec_for_java_type() { - MapType cqlType = DataTypes.mapOf(DataTypes.INT, DataTypes.mapOf(DataTypes.INT, DataTypes.INT)); - GenericType>> javaType = - new GenericType>>() {}; - Map> value = ImmutableMap.of(1, ImmutableMap.of(1, 1)); - + @UseDataProvider( + value = "collectionsWithCqlAndJavaTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_collection_codec_for_cql_type_and_java_value( + DataType cqlType, GenericType javaType, GenericType javaTypeLookup, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(javaType); + TypeCodec codec = registry.codecFor(cqlType, value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(javaType)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(null, javaType, false); - inOrder.verify(mockCache).lookup(null, new GenericType>() {}, false); + inOrder.verify(mockCache).lookup(cqlType, javaTypeLookup, true); } @Test - public void should_create_map_codec_for_cql_type_and_java_value() { - MapType cqlType = DataTypes.mapOf(DataTypes.INT, DataTypes.mapOf(DataTypes.INT, DataTypes.INT)); - GenericType>> javaType = - new GenericType>>() {}; - Map> value = ImmutableMap.of(1, ImmutableMap.of(1, 1)); - + @UseDataProvider( + value = "collectionsWithCqlAndJavaTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_collection_codec_for_java_value( + DataType cqlType, GenericType javaType, GenericType javaTypeLookup, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, value); + TypeCodec codec = registry.codecFor(value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(javaType)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, javaType, true); - inOrder - .verify(mockCache) - .lookup( - DataTypes.mapOf(DataTypes.INT, DataTypes.INT), - GenericType.mapOf(GenericType.INTEGER, GenericType.INTEGER), - true); + inOrder.verify(mockCache).lookup(cqlType, javaTypeLookup, true); } @Test - public void should_create_map_codec_for_java_value() { - MapType cqlType = DataTypes.mapOf(DataTypes.INT, DataTypes.mapOf(DataTypes.INT, DataTypes.INT)); - GenericType>> javaType = - new GenericType>>() {}; - Map> value = ImmutableMap.of(1, ImmutableMap.of(1, 1)); - + @UseDataProvider( + value = "emptyCollectionsWithCqlAndJavaTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_collection_codec_for_empty_java_value( + DataType cqlType, + GenericType javaType, + DataType cqlTypeLookup, + GenericType javaTypeLookup, + Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(null, javaType, true); - inOrder - .verify(mockCache) - .lookup(null, GenericType.mapOf(GenericType.INTEGER, GenericType.INTEGER), true); - } - - @Test - public void should_create_map_codec_for_empty_java_value() { - GenericType> javaType = - GenericType.mapOf(GenericType.BOOLEAN, GenericType.BOOLEAN); - Map> value = ImmutableMap.of(1, Collections.emptyMap()); - - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(value); + TypeCodec codec = registry.codecFor(value); assertThat(codec).isNotNull(); + assertThat(codec.accepts(cqlType)).isFalse(); + assertThat(codec.accepts(javaType)).isFalse(); assertThat(codec.accepts(value)).isTrue(); // Note that empty collections without CQL type are a corner case, in that the registry returns @@ -616,66 +234,45 @@ public void should_create_map_codec_for_empty_java_value() { // The only requirement is that it can encode the value, which holds true: codec.encode(value, ProtocolVersion.DEFAULT); - inOrder.verify(mockCache).lookup(null, javaType, true); + inOrder.verify(mockCache).lookup(cqlTypeLookup, javaTypeLookup, true); } @Test - public void should_create_map_codec_for_cql_type_and_empty_java_value() { - MapType cqlType = - DataTypes.mapOf(DataTypes.INT, DataTypes.mapOf(DataTypes.DOUBLE, DataTypes.TEXT)); - GenericType>> javaType = - GenericType.mapOf( - GenericType.INTEGER, GenericType.mapOf(GenericType.DOUBLE, GenericType.STRING)); - Map> value = ImmutableMap.of(1, Collections.emptyMap()); - + @UseDataProvider( + value = "emptyCollectionsWithCqlAndJavaTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_collection_codec_for_cql_type_and_empty_java_value( + DataType cqlType, GenericType javaType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec>> codec = registry.codecFor(cqlType, value); + TypeCodec codec = registry.codecFor(cqlType, value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(javaType)).isTrue(); assertThat(codec.accepts(value)).isTrue(); // Verify that the codec can encode the value codec.encode(value, ProtocolVersion.DEFAULT); - inOrder.verify(mockCache).lookup(cqlType, javaType, true); } @Test - public void should_create_map_codec_for_java_value_when_first_element_is_a_subtype() - throws UnknownHostException { - MapType cqlType = DataTypes.mapOf(DataTypes.INET, DataTypes.INET); - GenericType> javaType = - new GenericType>() {}; - InetAddress address = InetAddress.getByAddress(new byte[] {127, 0, 0, 1}); - // Because the actual implementation is a subclass, there is no exact match with the codec's - // declared type - assertThat(address).isInstanceOf(Inet4Address.class); - Map value = ImmutableMap.of(address, address); - + @UseDataProvider( + value = "collectionsWithNullElements", + location = CachingCodecRegistryTestDataProviders.class) + public void should_throw_for_collection_containing_null_element(Object value, String expected) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); - InOrder inOrder = inOrder(mockCache); - - TypeCodec> codec = registry.codecFor(value); - assertThat(codec).isNotNull(); - assertThat(codec.accepts(cqlType)).isTrue(); - assertThat(codec.accepts(javaType)).isTrue(); - assertThat(codec.accepts(value)).isTrue(); - - inOrder - .verify(mockCache) - .lookup(null, GenericType.mapOf(Inet4Address.class, Inet4Address.class), true); + assertThatThrownBy(() -> registry.codecFor(value)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage(expected); } @Test - public void should_create_tuple_codec_for_cql_and_java_types() { - TupleType cqlType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleValue value = cqlType.newValue(); - + @UseDataProvider( + value = "tuplesWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_tuple_codec_for_cql_and_java_types(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, GenericType.TUPLE_VALUE); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); @@ -684,17 +281,15 @@ public void should_create_tuple_codec_for_cql_and_java_types() { assertThat(codec.accepts(value)).isTrue(); inOrder.verify(mockCache).lookup(cqlType, GenericType.TUPLE_VALUE, false); // field codecs are only looked up when fields are accessed, so no cache hit for list now - } @Test - public void should_create_tuple_codec_for_cql_type() { - TupleType cqlType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleValue value = cqlType.newValue(); - + @UseDataProvider( + value = "tuplesWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_tuple_codec_for_cql_type(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); @@ -705,58 +300,47 @@ public void should_create_tuple_codec_for_cql_type() { } @Test - public void should_create_tuple_codec_for_cql_type_and_java_value() { - TupleType cqlType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleValue value = cqlType.newValue(); - + @UseDataProvider( + value = "tuplesWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_tuple_codec_for_cql_type_and_java_value( + DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec codec = registry.codecFor(cqlType, value); + TypeCodec codec = registry.codecFor(cqlType, value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); assertThat(codec.accepts(TupleValue.class)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.TUPLE_VALUE, false); - + inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultTupleValue.class), true); inOrder.verifyNoMoreInteractions(); } @Test - public void should_create_tuple_codec_for_java_value() { - TupleType cqlType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); - TupleValue value = cqlType.newValue(); - + @UseDataProvider( + value = "tuplesWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_tuple_codec_for_java_value(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec codec = registry.codecFor(value); + TypeCodec codec = registry.codecFor(value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(GenericType.TUPLE_VALUE)).isTrue(); assertThat(codec.accepts(TupleValue.class)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - // UDTs know their CQL type, so the actual lookup is by CQL + Java type, and therefore not - // covariant. - inOrder.verify(mockCache).lookup(cqlType, GenericType.TUPLE_VALUE, false); - + inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultTupleValue.class), true); inOrder.verifyNoMoreInteractions(); } @Test - public void should_create_udt_codec_for_cql_and_java_types() { - UserDefinedType cqlType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UdtValue value = cqlType.newValue(); - + @UseDataProvider( + value = "udtsWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_udt_codec_for_cql_and_java_types(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType, GenericType.UDT_VALUE); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); @@ -769,18 +353,12 @@ public void should_create_udt_codec_for_cql_and_java_types() { } @Test - public void should_create_udt_codec_for_cql_type() { - UserDefinedType cqlType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UdtValue value = cqlType.newValue(); - + @UseDataProvider( + value = "udtsWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_udt_codec_for_cql_type(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - TypeCodec codec = registry.codecFor(cqlType); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); @@ -791,52 +369,36 @@ public void should_create_udt_codec_for_cql_type() { } @Test - public void should_create_udt_codec_for_cql_type_and_java_value() { - UserDefinedType cqlType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UdtValue value = cqlType.newValue(); - + @UseDataProvider( + value = "udtsWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_udt_codec_for_cql_type_and_java_value(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec codec = registry.codecFor(cqlType, value); + TypeCodec codec = registry.codecFor(cqlType, value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); assertThat(codec.accepts(UdtValue.class)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - inOrder.verify(mockCache).lookup(cqlType, GenericType.UDT_VALUE, false); - + inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultUdtValue.class), true); inOrder.verifyNoMoreInteractions(); } @Test - public void should_create_udt_codec_for_java_value() { - UserDefinedType cqlType = - new UserDefinedTypeBuilder( - CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) - .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) - .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) - .build(); - UdtValue value = cqlType.newValue(); - + @UseDataProvider( + value = "udtsWithCqlTypes", + location = CachingCodecRegistryTestDataProviders.class) + public void should_create_udt_codec_for_java_value(DataType cqlType, Object value) { TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); InOrder inOrder = inOrder(mockCache); - - TypeCodec codec = registry.codecFor(value); + TypeCodec codec = registry.codecFor(value); assertThat(codec).isNotNull(); assertThat(codec.accepts(cqlType)).isTrue(); assertThat(codec.accepts(GenericType.UDT_VALUE)).isTrue(); assertThat(codec.accepts(UdtValue.class)).isTrue(); assertThat(codec.accepts(value)).isTrue(); - // UDTs know their CQL type, so the actual lookup is by CQL + Java type, and therefore not - // covariant. - inOrder.verify(mockCache).lookup(cqlType, GenericType.UDT_VALUE, false); - + inOrder.verify(mockCache).lookup(cqlType, GenericType.of(DefaultUdtValue.class), true); inOrder.verifyNoMoreInteractions(); } @@ -865,7 +427,8 @@ public void should_not_find_codec_if_java_type_unknown() { @Test public void should_not_allow_covariance_for_lookups_by_java_type() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache, new ACodec()); + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + registry.register(new ACodec()); InOrder inOrder = inOrder(mockCache); // covariance not allowed @@ -887,8 +450,10 @@ public void should_not_allow_covariance_for_lookups_by_java_type() { @Test public void should_allow_covariance_for_lookups_by_cql_type_and_value() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache, new ACodec()); + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + registry.register(new ACodec()); InOrder inOrder = inOrder(mockCache); + inOrder.verify(mockCache).lookup(DataTypes.INT, GenericType.of(A.class), false); // covariance allowed @@ -914,8 +479,10 @@ public void should_allow_covariance_for_lookups_by_cql_type_and_value() { @Test public void should_allow_covariance_for_lookups_by_value() { - TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache, new ACodec()); + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + registry.register(new ACodec()); InOrder inOrder = inOrder(mockCache); + inOrder.verify(mockCache).lookup(DataTypes.INT, GenericType.of(A.class), false); // covariance allowed @@ -937,13 +504,66 @@ public void should_allow_covariance_for_lookups_by_value() { inOrder.verifyNoMoreInteractions(); } + @Test + public void should_register_user_codec_at_runtime() { + CqlIntToStringCodec intToStringCodec = new CqlIntToStringCodec(); + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + registry.register(intToStringCodec); + // register checks the cache for collisions + verify(mockCache).lookup(DataTypes.INT, GenericType.STRING, false); + + // When the mapping is not ambiguous, the user type should be returned + assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec); + assertThat(registry.codecFor(DataTypes.INT, String.class)).isSameAs(intToStringCodec); + assertThat(registry.codecFor(DataTypes.INT, "123")).isSameAs(intToStringCodec); + + // When there is an ambiguity with a built-in codec, the built-in codec should have priority + assertThat(registry.codecFor(DataTypes.INT)).isSameAs(TypeCodecs.INT); + assertThat(registry.codecFor("123")).isSameAs(TypeCodecs.TEXT); + + verifyZeroInteractions(mockCache); + } + + @Test + public void should_ignore_user_codec_if_collides_with_builtin_codec() { + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + + IntCodec userIntCodec = new IntCodec(); + registry.register(userIntCodec); + + assertThat(registry.codecFor(DataTypes.INT, Integer.class)).isNotSameAs(userIntCodec); + } + + @Test + public void should_ignore_user_codec_if_collides_with_other_user_codec() { + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + CqlIntToStringCodec intToStringCodec1 = new CqlIntToStringCodec(); + CqlIntToStringCodec intToStringCodec2 = new CqlIntToStringCodec(); + + registry.register(intToStringCodec1, intToStringCodec2); + + assertThat(registry.codecFor(DataTypes.INT, GenericType.STRING)).isSameAs(intToStringCodec1); + } + + @Test + public void should_ignore_user_codec_if_collides_with_generated_codec() { + TestCachingCodecRegistry registry = new TestCachingCodecRegistry(mockCache); + + TypeCodec> userListOfIntCodec = TypeCodecs.listOf(TypeCodecs.INT); + registry.register(userListOfIntCodec); + + assertThat( + registry.codecFor(DataTypes.listOf(DataTypes.INT), GenericType.listOf(Integer.class))) + .isNotSameAs(userListOfIntCodec); + } + // Our intent is not to test Guava cache, so we don't need an actual cache here. // The only thing we want to check in our tests is if getCachedCodec was called. public static class TestCachingCodecRegistry extends CachingCodecRegistry { private final MockCache cache; - public TestCachingCodecRegistry(MockCache cache, TypeCodec... userCodecs) { - super("test", CodecRegistryConstants.PRIMITIVE_CODECS, userCodecs); + TestCachingCodecRegistry(MockCache cache) { + super("test", CodecRegistryConstants.PRIMITIVE_CODECS); this.cache = cache; } diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java new file mode 100644 index 00000000000..4c0298bafad --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/codec/registry/CachingCodecRegistryTestDataProviders.java @@ -0,0 +1,639 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec.registry; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.CqlVector; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.internal.core.data.DefaultTupleValue; +import com.datastax.oss.driver.internal.core.data.DefaultUdtValue; +import com.datastax.oss.driver.internal.core.type.UserDefinedTypeBuilder; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.tngtech.java.junit.dataprovider.DataProvider; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.Inet4Address; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.util.Collections; +import java.util.UUID; + +@SuppressWarnings("unused") +public class CachingCodecRegistryTestDataProviders { + + @DataProvider + public static Object[][] primitiveCodecs() { + return new Object[][] { + {TypeCodecs.BOOLEAN}, + {TypeCodecs.TINYINT}, + {TypeCodecs.DOUBLE}, + {TypeCodecs.COUNTER}, + {TypeCodecs.FLOAT}, + {TypeCodecs.INT}, + {TypeCodecs.BIGINT}, + {TypeCodecs.SMALLINT}, + {TypeCodecs.TIMESTAMP}, + {TypeCodecs.DATE}, + {TypeCodecs.TIME}, + {TypeCodecs.BLOB}, + {TypeCodecs.TEXT}, + {TypeCodecs.ASCII}, + {TypeCodecs.VARINT}, + {TypeCodecs.DECIMAL}, + {TypeCodecs.UUID}, + {TypeCodecs.TIMEUUID}, + {TypeCodecs.INET}, + {TypeCodecs.DURATION}, + }; + } + + @DataProvider + public static Object[][] primitiveCodecsWithValues() throws UnknownHostException { + return new Object[][] { + {true, TypeCodecs.BOOLEAN}, + {(byte) 0, TypeCodecs.TINYINT}, + {0.0, TypeCodecs.DOUBLE}, + {0.0f, TypeCodecs.FLOAT}, + {0, TypeCodecs.INT}, + {0L, TypeCodecs.BIGINT}, + {(short) 0, TypeCodecs.SMALLINT}, + {Instant.EPOCH, TypeCodecs.TIMESTAMP}, + {LocalDate.MIN, TypeCodecs.DATE}, + {LocalTime.MIDNIGHT, TypeCodecs.TIME}, + {ByteBuffer.allocate(0), TypeCodecs.BLOB}, + {"", TypeCodecs.TEXT}, + {BigInteger.ONE, TypeCodecs.VARINT}, + {BigDecimal.ONE, TypeCodecs.DECIMAL}, + {new UUID(2L, 1L), TypeCodecs.UUID}, + {InetAddress.getByName("127.0.0.1"), TypeCodecs.INET}, + {CqlDuration.newInstance(1, 2, 3), TypeCodecs.DURATION}, + }; + } + + @DataProvider + public static Object[][] primitiveCodecsWithCqlTypesAndValues() throws UnknownHostException { + return new Object[][] { + {DataTypes.BOOLEAN, true, TypeCodecs.BOOLEAN}, + {DataTypes.TINYINT, (byte) 0, TypeCodecs.TINYINT}, + {DataTypes.DOUBLE, 0.0, TypeCodecs.DOUBLE}, + {DataTypes.FLOAT, 0.0f, TypeCodecs.FLOAT}, + {DataTypes.INT, 0, TypeCodecs.INT}, + {DataTypes.BIGINT, 0L, TypeCodecs.BIGINT}, + {DataTypes.SMALLINT, (short) 0, TypeCodecs.SMALLINT}, + {DataTypes.TIMESTAMP, Instant.EPOCH, TypeCodecs.TIMESTAMP}, + {DataTypes.DATE, LocalDate.MIN, TypeCodecs.DATE}, + {DataTypes.TIME, LocalTime.MIDNIGHT, TypeCodecs.TIME}, + {DataTypes.BLOB, ByteBuffer.allocate(0), TypeCodecs.BLOB}, + {DataTypes.TEXT, "", TypeCodecs.TEXT}, + {DataTypes.VARINT, BigInteger.ONE, TypeCodecs.VARINT}, + {DataTypes.DECIMAL, BigDecimal.ONE, TypeCodecs.DECIMAL}, + {DataTypes.UUID, new UUID(2L, 1L), TypeCodecs.UUID}, + {DataTypes.INET, InetAddress.getByName("127.0.0.1"), TypeCodecs.INET}, + {DataTypes.DURATION, CqlDuration.newInstance(1, 2, 3), TypeCodecs.DURATION}, + }; + } + + @DataProvider + public static Object[][] collectionsWithCqlAndJavaTypes() + throws UnknownHostException, ClassNotFoundException { + TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); + TupleValue tupleValue = tupleType.newValue(); + UserDefinedType userType = + new UserDefinedTypeBuilder( + CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) + .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) + .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) + .build(); + UdtValue udtValue = userType.newValue(); + return new Object[][] { + // lists + { + DataTypes.listOf(DataTypes.INT), + GenericType.listOf(Integer.class), + GenericType.listOf(Integer.class), + ImmutableList.of(1) + }, + { + DataTypes.listOf(DataTypes.TEXT), + GenericType.listOf(String.class), + GenericType.listOf(String.class), + ImmutableList.of("foo") + }, + { + DataTypes.listOf(DataTypes.BLOB), + GenericType.listOf(ByteBuffer.class), + GenericType.listOf(Class.forName("java.nio.HeapByteBuffer")), + ImmutableList.of(ByteBuffer.wrap(new byte[] {127, 0, 0, 1})) + }, + { + DataTypes.listOf(DataTypes.INET), + GenericType.listOf(InetAddress.class), + GenericType.listOf(Inet4Address.class), + ImmutableList.of(InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) + }, + { + DataTypes.listOf(tupleType), + GenericType.listOf(TupleValue.class), + GenericType.listOf(DefaultTupleValue.class), + ImmutableList.of(tupleValue) + }, + { + DataTypes.listOf(userType), + GenericType.listOf(UdtValue.class), + GenericType.listOf(DefaultUdtValue.class), + ImmutableList.of(udtValue) + }, + { + DataTypes.listOf(DataTypes.listOf(DataTypes.INT)), + GenericType.listOf(GenericType.listOf(Integer.class)), + GenericType.listOf(GenericType.listOf(Integer.class)), + ImmutableList.of(ImmutableList.of(1)) + }, + { + DataTypes.listOf(DataTypes.listOf(tupleType)), + GenericType.listOf(GenericType.listOf(TupleValue.class)), + GenericType.listOf(GenericType.listOf(DefaultTupleValue.class)), + ImmutableList.of(ImmutableList.of(tupleValue)) + }, + { + DataTypes.listOf(DataTypes.listOf(userType)), + GenericType.listOf(GenericType.listOf(UdtValue.class)), + GenericType.listOf(GenericType.listOf(DefaultUdtValue.class)), + ImmutableList.of(ImmutableList.of(udtValue)) + }, + // sets + { + DataTypes.setOf(DataTypes.INT), + GenericType.setOf(Integer.class), + GenericType.setOf(Integer.class), + ImmutableSet.of(1) + }, + { + DataTypes.setOf(DataTypes.TEXT), + GenericType.setOf(String.class), + GenericType.setOf(String.class), + ImmutableSet.of("foo") + }, + { + DataTypes.setOf(DataTypes.BLOB), + GenericType.setOf(ByteBuffer.class), + GenericType.setOf(Class.forName("java.nio.HeapByteBuffer")), + ImmutableSet.of(ByteBuffer.wrap(new byte[] {127, 0, 0, 1})) + }, + { + DataTypes.setOf(DataTypes.INET), + GenericType.setOf(InetAddress.class), + GenericType.setOf(Inet4Address.class), + ImmutableSet.of(InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) + }, + { + DataTypes.setOf(tupleType), + GenericType.setOf(TupleValue.class), + GenericType.setOf(DefaultTupleValue.class), + ImmutableSet.of(tupleValue) + }, + { + DataTypes.setOf(userType), + GenericType.setOf(UdtValue.class), + GenericType.setOf(DefaultUdtValue.class), + ImmutableSet.of(udtValue) + }, + { + DataTypes.setOf(DataTypes.setOf(DataTypes.INT)), + GenericType.setOf(GenericType.setOf(Integer.class)), + GenericType.setOf(GenericType.setOf(Integer.class)), + ImmutableSet.of(ImmutableSet.of(1)) + }, + { + DataTypes.setOf(DataTypes.setOf(tupleType)), + GenericType.setOf(GenericType.setOf(TupleValue.class)), + GenericType.setOf(GenericType.setOf(DefaultTupleValue.class)), + ImmutableSet.of(ImmutableSet.of(tupleValue)) + }, + { + DataTypes.setOf(DataTypes.setOf(userType)), + GenericType.setOf(GenericType.setOf(UdtValue.class)), + GenericType.setOf(GenericType.setOf(DefaultUdtValue.class)), + ImmutableSet.of(ImmutableSet.of(udtValue)) + }, + // maps + { + DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), + GenericType.mapOf(Integer.class, String.class), + GenericType.mapOf(Integer.class, String.class), + ImmutableMap.of(1, "foo") + }, + { + DataTypes.mapOf(DataTypes.BLOB, DataTypes.INET), + GenericType.mapOf(ByteBuffer.class, InetAddress.class), + GenericType.mapOf(Class.forName("java.nio.HeapByteBuffer"), Inet4Address.class), + ImmutableMap.of( + ByteBuffer.wrap(new byte[] {127, 0, 0, 1}), + InetAddress.getByAddress(new byte[] {127, 0, 0, 1})) + }, + { + DataTypes.mapOf(tupleType, tupleType), + GenericType.mapOf(TupleValue.class, TupleValue.class), + GenericType.mapOf(DefaultTupleValue.class, DefaultTupleValue.class), + ImmutableMap.of(tupleValue, tupleValue) + }, + { + DataTypes.mapOf(userType, userType), + GenericType.mapOf(UdtValue.class, UdtValue.class), + GenericType.mapOf(DefaultUdtValue.class, DefaultUdtValue.class), + ImmutableMap.of(udtValue, udtValue) + }, + { + DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT)), + GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), + GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), + ImmutableMap.of(UUID.randomUUID(), ImmutableMap.of(1, "foo")) + }, + { + DataTypes.mapOf(DataTypes.mapOf(userType, userType), DataTypes.mapOf(tupleType, tupleType)), + GenericType.mapOf( + GenericType.mapOf(UdtValue.class, UdtValue.class), + GenericType.mapOf(TupleValue.class, TupleValue.class)), + GenericType.mapOf( + GenericType.mapOf(DefaultUdtValue.class, DefaultUdtValue.class), + GenericType.mapOf(DefaultTupleValue.class, DefaultTupleValue.class)), + ImmutableMap.of( + ImmutableMap.of(udtValue, udtValue), ImmutableMap.of(tupleValue, tupleValue)) + }, + // vectors + { + DataTypes.vectorOf(DataTypes.INT, 1), + GenericType.vectorOf(Integer.class), + GenericType.vectorOf(Integer.class), + CqlVector.newInstance(1) + }, + { + DataTypes.vectorOf(DataTypes.BIGINT, 1), + GenericType.vectorOf(Long.class), + GenericType.vectorOf(Long.class), + CqlVector.newInstance(1l) + }, + { + DataTypes.vectorOf(DataTypes.SMALLINT, 1), + GenericType.vectorOf(Short.class), + GenericType.vectorOf(Short.class), + CqlVector.newInstance((short) 1) + }, + { + DataTypes.vectorOf(DataTypes.TINYINT, 1), + GenericType.vectorOf(Byte.class), + GenericType.vectorOf(Byte.class), + CqlVector.newInstance((byte) 1) + }, + { + DataTypes.vectorOf(DataTypes.FLOAT, 1), + GenericType.vectorOf(Float.class), + GenericType.vectorOf(Float.class), + CqlVector.newInstance(1.0f) + }, + { + DataTypes.vectorOf(DataTypes.DOUBLE, 1), + GenericType.vectorOf(Double.class), + GenericType.vectorOf(Double.class), + CqlVector.newInstance(1.0d) + }, + { + DataTypes.vectorOf(DataTypes.DECIMAL, 1), + GenericType.vectorOf(BigDecimal.class), + GenericType.vectorOf(BigDecimal.class), + CqlVector.newInstance(BigDecimal.ONE) + }, + { + DataTypes.vectorOf(DataTypes.VARINT, 1), + GenericType.vectorOf(BigInteger.class), + GenericType.vectorOf(BigInteger.class), + CqlVector.newInstance(BigInteger.ONE) + }, + // vector with arbitrary types + { + DataTypes.vectorOf(DataTypes.TEXT, 2), + GenericType.vectorOf(String.class), + GenericType.vectorOf(String.class), + CqlVector.newInstance("abc", "de") + }, + { + DataTypes.vectorOf(DataTypes.TIME, 2), + GenericType.vectorOf(LocalTime.class), + GenericType.vectorOf(LocalTime.class), + CqlVector.newInstance(LocalTime.MIDNIGHT, LocalTime.NOON) + }, + { + DataTypes.vectorOf(DataTypes.vectorOf(DataTypes.TINYINT, 2), 2), + GenericType.vectorOf(GenericType.vectorOf(Byte.class)), + GenericType.vectorOf(GenericType.vectorOf(Byte.class)), + CqlVector.newInstance( + CqlVector.newInstance((byte) 1, (byte) 2), CqlVector.newInstance((byte) 3, (byte) 4)) + }, + }; + } + + @DataProvider + public static Object[][] emptyCollectionsWithCqlAndJavaTypes() { + TupleType tupleType = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); + UserDefinedType userType = + new UserDefinedTypeBuilder( + CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) + .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) + .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) + .build(); + return new Object[][] { + // lists + { + DataTypes.listOf(DataTypes.INT), + GenericType.listOf(Integer.class), + DataTypes.listOf(DataTypes.BOOLEAN), + GenericType.listOf(Boolean.class), + Collections.emptyList() + }, + { + DataTypes.listOf(DataTypes.TEXT), + GenericType.listOf(String.class), + DataTypes.listOf(DataTypes.BOOLEAN), + GenericType.listOf(Boolean.class), + Collections.emptyList() + }, + { + DataTypes.listOf(DataTypes.BLOB), + GenericType.listOf(ByteBuffer.class), + DataTypes.listOf(DataTypes.BOOLEAN), + GenericType.listOf(Boolean.class), + Collections.emptyList() + }, + { + DataTypes.listOf(DataTypes.INET), + GenericType.listOf(InetAddress.class), + DataTypes.listOf(DataTypes.BOOLEAN), + GenericType.listOf(Boolean.class), + Collections.emptyList() + }, + { + DataTypes.listOf(tupleType), + GenericType.listOf(TupleValue.class), + DataTypes.listOf(DataTypes.BOOLEAN), + GenericType.listOf(Boolean.class), + Collections.emptyList() + }, + { + DataTypes.listOf(userType), + GenericType.listOf(UdtValue.class), + DataTypes.listOf(DataTypes.BOOLEAN), + GenericType.listOf(Boolean.class), + Collections.emptyList() + }, + { + DataTypes.listOf(DataTypes.listOf(DataTypes.INT)), + GenericType.listOf(GenericType.listOf(Integer.class)), + DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), + GenericType.listOf(GenericType.listOf(Boolean.class)), + ImmutableList.of(Collections.emptyList()) + }, + { + DataTypes.listOf(DataTypes.listOf(tupleType)), + GenericType.listOf(GenericType.listOf(TupleValue.class)), + DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), + GenericType.listOf(GenericType.listOf(Boolean.class)), + ImmutableList.of(Collections.emptyList()) + }, + { + DataTypes.listOf(DataTypes.listOf(userType)), + GenericType.listOf(GenericType.listOf(UdtValue.class)), + DataTypes.listOf(DataTypes.listOf(DataTypes.BOOLEAN)), + GenericType.listOf(GenericType.listOf(Boolean.class)), + ImmutableList.of(Collections.emptyList()) + }, + // sets + { + DataTypes.setOf(DataTypes.INT), + GenericType.setOf(Integer.class), + DataTypes.setOf(DataTypes.BOOLEAN), + GenericType.setOf(Boolean.class), + Collections.emptySet() + }, + { + DataTypes.setOf(DataTypes.TEXT), + GenericType.setOf(String.class), + DataTypes.setOf(DataTypes.BOOLEAN), + GenericType.setOf(Boolean.class), + Collections.emptySet() + }, + { + DataTypes.setOf(DataTypes.BLOB), + GenericType.setOf(ByteBuffer.class), + DataTypes.setOf(DataTypes.BOOLEAN), + GenericType.setOf(Boolean.class), + Collections.emptySet() + }, + { + DataTypes.setOf(DataTypes.INET), + GenericType.setOf(InetAddress.class), + DataTypes.setOf(DataTypes.BOOLEAN), + GenericType.setOf(Boolean.class), + Collections.emptySet() + }, + { + DataTypes.setOf(tupleType), + GenericType.setOf(TupleValue.class), + DataTypes.setOf(DataTypes.BOOLEAN), + GenericType.setOf(Boolean.class), + Collections.emptySet() + }, + { + DataTypes.setOf(userType), + GenericType.setOf(UdtValue.class), + DataTypes.setOf(DataTypes.BOOLEAN), + GenericType.setOf(Boolean.class), + Collections.emptySet() + }, + { + DataTypes.setOf(DataTypes.setOf(DataTypes.INT)), + GenericType.setOf(GenericType.setOf(Integer.class)), + DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), + GenericType.setOf(GenericType.setOf(Boolean.class)), + ImmutableSet.of(Collections.emptySet()) + }, + { + DataTypes.setOf(DataTypes.setOf(tupleType)), + GenericType.setOf(GenericType.setOf(TupleValue.class)), + DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), + GenericType.setOf(GenericType.setOf(Boolean.class)), + ImmutableSet.of(Collections.emptySet()) + }, + { + DataTypes.setOf(DataTypes.setOf(userType)), + GenericType.setOf(GenericType.setOf(UdtValue.class)), + DataTypes.setOf(DataTypes.setOf(DataTypes.BOOLEAN)), + GenericType.setOf(GenericType.setOf(Boolean.class)), + ImmutableSet.of(Collections.emptySet()) + }, + // maps + { + DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), + GenericType.mapOf(Integer.class, String.class), + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), + GenericType.mapOf(Boolean.class, Boolean.class), + Collections.emptyMap() + }, + { + DataTypes.mapOf(DataTypes.BLOB, DataTypes.INET), + GenericType.mapOf(ByteBuffer.class, InetAddress.class), + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), + GenericType.mapOf(Boolean.class, Boolean.class), + Collections.emptyMap() + }, + { + DataTypes.mapOf(tupleType, tupleType), + GenericType.mapOf(TupleValue.class, TupleValue.class), + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), + GenericType.mapOf(Boolean.class, Boolean.class), + Collections.emptyMap() + }, + { + DataTypes.mapOf(userType, userType), + GenericType.mapOf(UdtValue.class, UdtValue.class), + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), + GenericType.mapOf(Boolean.class, Boolean.class), + Collections.emptyMap() + }, + { + DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT)), + GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Integer.class, String.class)), + DataTypes.mapOf(DataTypes.UUID, DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN)), + GenericType.mapOf(GenericType.UUID, GenericType.mapOf(Boolean.class, Boolean.class)), + ImmutableMap.of(UUID.randomUUID(), Collections.emptyMap()) + }, + { + DataTypes.mapOf(DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT), DataTypes.UUID), + GenericType.mapOf(GenericType.mapOf(Integer.class, String.class), GenericType.UUID), + DataTypes.mapOf(DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), DataTypes.UUID), + GenericType.mapOf(GenericType.mapOf(Boolean.class, Boolean.class), GenericType.UUID), + ImmutableMap.of(Collections.emptyMap(), UUID.randomUUID()) + }, + { + DataTypes.mapOf(DataTypes.mapOf(userType, userType), DataTypes.mapOf(tupleType, tupleType)), + GenericType.mapOf( + GenericType.mapOf(UdtValue.class, UdtValue.class), + GenericType.mapOf(TupleValue.class, TupleValue.class)), + DataTypes.mapOf( + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN), + DataTypes.mapOf(DataTypes.BOOLEAN, DataTypes.BOOLEAN)), + GenericType.mapOf( + GenericType.mapOf(Boolean.class, Boolean.class), + GenericType.mapOf(Boolean.class, Boolean.class)), + ImmutableMap.of(Collections.emptyMap(), Collections.emptyMap()) + }, + }; + } + + @DataProvider + public static Object[][] collectionsWithNullElements() { + return new Object[][] { + { + Collections.singletonList(null), + "Can't infer list codec because the first element is null " + + "(note that CQL does not allow null values in collections)" + }, + { + Collections.singleton(null), + "Can't infer set codec because the first element is null " + + "(note that CQL does not allow null values in collections)" + }, + { + Collections.singletonMap("foo", null), + "Can't infer map codec because the first key and/or value is null " + + "(note that CQL does not allow null values in collections)" + }, + { + Collections.singletonMap(null, "foo"), + "Can't infer map codec because the first key and/or value is null " + + "(note that CQL does not allow null values in collections)" + }, + { + Collections.singletonMap(null, null), + "Can't infer map codec because the first key and/or value is null " + + "(note that CQL does not allow null values in collections)" + }, + }; + } + + @DataProvider + public static Object[][] tuplesWithCqlTypes() { + TupleType tupleType1 = DataTypes.tupleOf(DataTypes.INT, DataTypes.TEXT); + TupleType tupleType2 = DataTypes.tupleOf(DataTypes.INT, DataTypes.listOf(DataTypes.TEXT)); + TupleType tupleType3 = DataTypes.tupleOf(DataTypes.mapOf(tupleType1, tupleType2)); + TupleValue tupleValue1 = tupleType1.newValue(42, "foo"); + TupleValue tupleValue2 = tupleType2.newValue(42, ImmutableList.of("foo", "bar")); + return new Object[][] { + {tupleType1, tupleType1.newValue()}, + {tupleType1, tupleValue1}, + {tupleType2, tupleType2.newValue()}, + {tupleType2, tupleValue2}, + {tupleType3, tupleType3.newValue()}, + {tupleType3, tupleType3.newValue(ImmutableMap.of(tupleValue1, tupleValue2))}, + }; + } + + @DataProvider + public static Object[][] udtsWithCqlTypes() { + UserDefinedType userType1 = + new UserDefinedTypeBuilder( + CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) + .withField(CqlIdentifier.fromInternal("field1"), DataTypes.INT) + .withField(CqlIdentifier.fromInternal("field2"), DataTypes.TEXT) + .build(); + UserDefinedType userType2 = + new UserDefinedTypeBuilder( + CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) + .withField(CqlIdentifier.fromInternal("field1"), DataTypes.setOf(DataTypes.BIGINT)) + .withField(CqlIdentifier.fromInternal("field2"), DataTypes.listOf(DataTypes.TEXT)) + .build(); + UserDefinedType userType3 = + new UserDefinedTypeBuilder( + CqlIdentifier.fromInternal("ks"), CqlIdentifier.fromInternal("type")) + .withField(CqlIdentifier.fromInternal("field1"), DataTypes.mapOf(userType1, userType2)) + .build(); + UdtValue userValue1 = userType1.newValue(42, "foo"); + UdtValue userValue2 = + userType2.newValue(ImmutableSet.of(24L, 43L), ImmutableList.of("foo", "bar")); + return new Object[][] { + {userType1, userType1.newValue()}, + {userType1, userValue1}, + {userType2, userType2.newValue()}, + {userType2, userValue2}, + {userType3, userType3.newValue()}, + {userType3, userType3.newValue(ImmutableMap.of(userValue1, userValue2))}, + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java new file mode 100644 index 00000000000..b85d6d66844 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/type/util/VIntCodingTest.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.util; + +import static org.junit.Assert.assertEquals; + +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.nio.ByteBuffer; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class VIntCodingTest { + @DataProvider + public static Object[] roundTripTestValues() { + return new Integer[] { + Integer.MAX_VALUE + 1, + Integer.MAX_VALUE, + Integer.MAX_VALUE - 1, + Integer.MIN_VALUE, + Integer.MIN_VALUE + 1, + Integer.MIN_VALUE - 1, + 0, + -1, + 1 + }; + }; + + private static final long[] LONGS = + new long[] { + 53L, + 10201L, + 1097151L, + 168435455L, + 33251130335L, + 3281283447775L, + 417672546086779L, + 52057592037927932L, + 72057594037927937L + }; + + @Test + public void should_compute_unsigned_vint_size() { + for (int i = 0; i < LONGS.length; i++) { + long val = LONGS[i]; + assertEquals(i + 1, VIntCoding.computeUnsignedVIntSize(val)); + } + } + + @Test + @UseDataProvider("roundTripTestValues") + public void should_write_and_read_unsigned_vint_32(int value) { + ByteBuffer bb = ByteBuffer.allocate(9); + + VIntCoding.writeUnsignedVInt32(value, bb); + bb.flip(); + assertEquals(value, VIntCoding.getUnsignedVInt32(bb, 0)); + } + + @Test + @UseDataProvider("roundTripTestValues") + public void should_write_and_read_unsigned_vint(int value) { + ByteBuffer bb = ByteBuffer.allocate(9); + + VIntCoding.writeUnsignedVInt(value, bb); + bb.flip(); + assertEquals(value, VIntCoding.getUnsignedVInt(bb, 0)); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java index e6a878f7450..c2df6449fdb 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ArrayUtilsTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +22,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.util.concurrent.ThreadLocalRandom; +import java.util.Random; import org.junit.Test; public class ArrayUtilsTest { @@ -84,7 +86,7 @@ public void should_not_bubble_down_when_target_index_lower() { @Test public void should_shuffle_head() { String[] array = {"a", "b", "c", "d", "e"}; - ThreadLocalRandom random = mock(ThreadLocalRandom.class); + Random random = mock(Random.class); when(random.nextInt(anyInt())) .thenAnswer( (invocation) -> { diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java index 0983c1b8900..f526e2f12d4 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ByteBufs.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java new file mode 100644 index 00000000000..5a95e7f3b74 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/CollectionsUtilsTest.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.List; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class CollectionsUtilsTest { + @Test + @UseDataProvider("listsProvider") + public void should_combine_two_lists_by_index( + List firstList, List secondList, Map expected) { + + // when + Map result = + CollectionsUtils.combineListsIntoOrderedMap(firstList, secondList); + + // then + assertThat(result).isEqualTo(expected); + } + + @Test + public void should_throw_if_lists_have_not_matching_size() { + // given + List list1 = ImmutableList.of(1); + List list2 = ImmutableList.of(1, 2); + + // when + assertThatThrownBy(() -> CollectionsUtils.combineListsIntoOrderedMap(list1, list2)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageMatching("Cannot combine lists with not matching sizes"); + } + + @DataProvider + public static Object[][] listsProvider() { + + return new Object[][] { + {ImmutableList.of(1), ImmutableList.of(1), ImmutableMap.of(1, 1)}, + {ImmutableList.of(1, 10, 5), ImmutableList.of(1, 10, 5), ImmutableMap.of(1, 1, 10, 10, 5, 5)}, + {ImmutableList.of(1, 1), ImmutableList.of(1, 2), ImmutableMap.of(1, 2)} + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java index b673edcf1d2..1b37a5e5b19 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/DirectedGraphTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java new file mode 100644 index 00000000000..eec3669efca --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/LoggerTest.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import static org.mockito.Mockito.mock; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import org.mockito.ArgumentCaptor; +import org.slf4j.LoggerFactory; + +public class LoggerTest { + public static LoggerSetup setupTestLogger(Class clazz, Level levelToCapture) { + @SuppressWarnings("unchecked") + Appender appender = (Appender) mock(Appender.class); + + ArgumentCaptor loggingEventCaptor = ArgumentCaptor.forClass(ILoggingEvent.class); + Logger logger = (Logger) LoggerFactory.getLogger(clazz); + Level originalLoggerLevel = logger.getLevel(); + logger.setLevel(levelToCapture); + logger.addAppender(appender); + return new LoggerSetup(appender, originalLoggerLevel, logger, loggingEventCaptor); + } + + public static class LoggerSetup { + + private final Level originalLoggerLevel; + public final Appender appender; + public final Logger logger; + public ArgumentCaptor loggingEventCaptor; + + private LoggerSetup( + Appender appender, + Level originalLoggerLevel, + Logger logger, + ArgumentCaptor loggingEventCaptor) { + this.appender = appender; + this.originalLoggerLevel = originalLoggerLevel; + this.logger = logger; + this.loggingEventCaptor = loggingEventCaptor; + } + + public void close() { + logger.detachAppender(appender); + logger.setLevel(originalLoggerLevel); + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java index a809e7b0c9b..f2614775be4 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/ReflectionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -63,6 +65,7 @@ public void should_build_policies_per_profile() { Map policies = Reflection.buildFromConfigProfiles( context, + DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY, SpeculativeExecutionPolicy.class, "com.datastax.oss.driver.internal.core.specex"); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java new file mode 100644 index 00000000000..d5cc9dae161 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/StringsTest.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.TestDataProviders; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Locale; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class StringsTest { + + @Test + @UseDataProvider(location = TestDataProviders.class, value = "locales") + public void should_report_cql_keyword(Locale locale) { + Locale def = Locale.getDefault(); + try { + Locale.setDefault(locale); + + assertThat(Strings.isReservedCqlKeyword(null)).isFalse(); + assertThat(Strings.isReservedCqlKeyword("NOT A RESERVED KEYWORD")).isFalse(); + + assertThat(Strings.isReservedCqlKeyword("add")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("allow")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("alter")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("and")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("apply")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("asc")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("authorize")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("batch")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("begin")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("by")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("columnfamily")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("create")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("default")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("delete")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("desc")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("describe")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("drop")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("entries")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("execute")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("from")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("full")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("grant")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("if")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("in")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("index")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("infinity")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("insert")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("into")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("is")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("keyspace")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("limit")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("materialized")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("mbean")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("mbeans")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("modify")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("nan")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("norecursive")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("not")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("null")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("of")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("on")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("or")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("order")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("primary")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("rename")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("replace")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("revoke")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("schema")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("select")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("set")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("table")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("to")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("token")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("truncate")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("unlogged")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("unset")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("update")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("use")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("using")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("view")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("where")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("with")).isTrue(); + + assertThat(Strings.isReservedCqlKeyword("ALLOW")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("ALTER")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("AND")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("APPLY")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("ASC")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("AUTHORIZE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("BATCH")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("BEGIN")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("BY")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("COLUMNFAMILY")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("CREATE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("DEFAULT")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("DELETE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("DESC")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("DESCRIBE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("DROP")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("ENTRIES")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("EXECUTE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("FROM")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("FULL")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("GRANT")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("IF")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("IN")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("INDEX")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("INFINITY")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("INSERT")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("INTO")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("IS")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("KEYSPACE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("LIMIT")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("MATERIALIZED")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("MBEAN")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("MBEANS")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("MODIFY")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("NAN")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("NORECURSIVE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("NOT")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("NULL")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("OF")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("ON")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("OR")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("ORDER")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("PRIMARY")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("RENAME")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("REPLACE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("REVOKE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("SCHEMA")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("SELECT")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("SET")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("TABLE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("TO")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("TOKEN")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("TRUNCATE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("UNLOGGED")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("UNSET")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("UPDATE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("USE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("USING")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("VIEW")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("WHERE")).isTrue(); + assertThat(Strings.isReservedCqlKeyword("WITH")).isTrue(); + } finally { + Locale.setDefault(def); + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java new file mode 100644 index 00000000000..1adc06a79d3 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/CompositeQueryPlanTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class CompositeQueryPlanTest extends QueryPlanTestBase { + + @Override + protected QueryPlan newQueryPlan(Node... nodes) { + Object[] n1 = new Object[nodes.length / 2]; + Object[] n2 = new Object[nodes.length - n1.length]; + System.arraycopy(nodes, 0, n1, 0, n1.length); + System.arraycopy(nodes, n1.length, n2, 0, n2.length); + return new CompositeQueryPlan( + new SimpleQueryPlan(n1), + new LazyQueryPlan() { + @Override + protected Object[] computeNodes() { + return n2; + } + }); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java new file mode 100644 index 00000000000..99c72bace06 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/LazyQueryPlanTest.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class LazyQueryPlanTest extends QueryPlanTestBase { + + @Override + protected QueryPlan newQueryPlan(Node... nodes) { + return new LazyQueryPlan() { + @Override + protected Object[] computeNodes() { + return nodes; + } + }; + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTest.java deleted file mode 100644 index 8157a2662ee..00000000000 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.util.collection; - -import static com.datastax.oss.driver.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.metadata.Node; -import java.util.Iterator; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class QueryPlanTest { - - @Mock private Node node1; - @Mock private Node node2; - @Mock private Node node3; - - @Test - public void should_poll_elements() { - QueryPlan queryPlan = new QueryPlan(node1, node2, node3); - assertThat(queryPlan.poll()).isSameAs(node1); - assertThat(queryPlan.poll()).isSameAs(node2); - assertThat(queryPlan.poll()).isSameAs(node3); - assertThat(queryPlan.poll()).isNull(); - assertThat(queryPlan.poll()).isNull(); - } - - @Test - public void should_return_size() { - QueryPlan queryPlan = new QueryPlan(node1, node2, node3); - assertThat(queryPlan.size()).isEqualTo(3); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(2); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(1); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(0); - queryPlan.poll(); - assertThat(queryPlan.size()).isEqualTo(0); - } - - @Test - public void should_return_iterator() { - QueryPlan queryPlan = new QueryPlan(node1, node2, node3); - Iterator iterator3 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator2 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator1 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator0 = queryPlan.iterator(); - queryPlan.poll(); - Iterator iterator00 = queryPlan.iterator(); - - assertThat(iterator3).toIterable().containsExactly(node1, node2, node3); - assertThat(iterator2).toIterable().containsExactly(node2, node3); - assertThat(iterator1).toIterable().containsExactly(node3); - assertThat(iterator0).toIterable().isEmpty(); - assertThat(iterator00).toIterable().isEmpty(); - } -} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java new file mode 100644 index 00000000000..8689c282117 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/QueryPlanTestBase.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import static com.datastax.oss.driver.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.metadata.Node; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.ConcurrentSkipListSet; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public abstract class QueryPlanTestBase { + + @Mock private Node node1; + @Mock private Node node2; + @Mock private Node node3; + + @Test + public void should_poll_elements() { + QueryPlan queryPlan = newQueryPlan(node1, node2, node3); + assertThat(queryPlan.poll()).isSameAs(node1); + assertThat(queryPlan.poll()).isSameAs(node2); + assertThat(queryPlan.poll()).isSameAs(node3); + assertThat(queryPlan.poll()).isNull(); + assertThat(queryPlan.poll()).isNull(); + } + + @Test + public void should_poll_elements_concurrently() throws InterruptedException { + for (int runs = 0; runs < 5; runs++) { + Node[] nodes = new Node[1000]; + for (int i = 0; i < 1000; i++) { + nodes[i] = mock(Node.class, "node" + i); + when(nodes[i].getOpenConnections()).thenReturn(i); + } + QueryPlan queryPlan = newQueryPlan(nodes); + Set actual = + new ConcurrentSkipListSet<>(Comparator.comparingInt(Node::getOpenConnections)); + Thread[] threads = new Thread[5]; + for (int i = 0; i < 5; i++) { + threads[i] = + new Thread( + () -> { + while (true) { + Node node = queryPlan.poll(); + if (node == null) { + return; + } + actual.add(node); + } + }); + } + for (Thread thread : threads) { + thread.start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertThat(actual).hasSize(1000); + Iterator iterator = actual.iterator(); + for (int i = 0; iterator.hasNext(); i++) { + Node node = iterator.next(); + assertThat(node.getOpenConnections()).isEqualTo(i); + } + } + } + + @Test + public void should_return_size() { + QueryPlan queryPlan = newQueryPlan(node1, node2, node3); + assertThat(queryPlan.size()).isEqualTo(3); + queryPlan.poll(); + assertThat(queryPlan.size()).isEqualTo(2); + queryPlan.poll(); + assertThat(queryPlan.size()).isEqualTo(1); + queryPlan.poll(); + assertThat(queryPlan.size()).isEqualTo(0); + queryPlan.poll(); + assertThat(queryPlan.size()).isEqualTo(0); + } + + @Test + public void should_return_iterator() { + QueryPlan queryPlan = newQueryPlan(node1, node2, node3); + Iterator iterator3 = queryPlan.iterator(); + queryPlan.poll(); + Iterator iterator2 = queryPlan.iterator(); + queryPlan.poll(); + Iterator iterator1 = queryPlan.iterator(); + queryPlan.poll(); + Iterator iterator0 = queryPlan.iterator(); + queryPlan.poll(); + Iterator iterator00 = queryPlan.iterator(); + + assertThat(iterator3).toIterable().containsExactly(node1, node2, node3); + assertThat(iterator2).toIterable().containsExactly(node2, node3); + assertThat(iterator1).toIterable().containsExactly(node3); + assertThat(iterator0).toIterable().isEmpty(); + assertThat(iterator00).toIterable().isEmpty(); + } + + protected abstract QueryPlan newQueryPlan(Node... nodes); +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java new file mode 100644 index 00000000000..31e3e1006d7 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/collection/SimpleQueryPlanTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.collection; + +import com.datastax.oss.driver.api.core.metadata.Node; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class SimpleQueryPlanTest extends QueryPlanTestBase { + + @Override + protected QueryPlan newQueryPlan(Node... nodes) { + return new SimpleQueryPlan((Object[]) nodes); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java index 3b2cda52b35..88cdfa80104 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CapturingTimer.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +18,6 @@ package com.datastax.oss.driver.internal.core.util.concurrent; -import static org.assertj.core.api.Assertions.fail; - import io.netty.util.Timeout; import io.netty.util.Timer; import io.netty.util.TimerTask; @@ -51,12 +51,7 @@ public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { * tell from the returned Timeout itself. */ public CapturedTimeout getNextTimeout() { - try { - return timeoutQueue.poll(100, TimeUnit.MILLISECONDS); - } catch (InterruptedException ie) { - fail("Unexpected interruption", ie); - throw new AssertionError(); - } + return timeoutQueue.poll(); } @Override diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java new file mode 100644 index 00000000000..04f96f185fd --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CompletableFuturesTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.concurrent; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; + +import java.util.Arrays; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.Test; + +public class CompletableFuturesTest { + @Test + public void should_not_suppress_identical_exceptions() throws Exception { + RuntimeException error = new RuntimeException(); + CompletableFuture future1 = new CompletableFuture<>(); + future1.completeExceptionally(error); + CompletableFuture future2 = new CompletableFuture<>(); + future2.completeExceptionally(error); + try { + // if timeout exception is thrown, it indicates that CompletableFutures.allSuccessful() + // did not complete the returned future and potentially caller will wait infinitely + CompletableFutures.allSuccessful(Arrays.asList(future1, future2)) + .toCompletableFuture() + .get(1, TimeUnit.SECONDS); + fail(); + } catch (ExecutionException e) { + assertThat(e.getCause()).isEqualTo(error); + } + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java index d8de58564f5..74e0801ff61 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/CycleDetectorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java index 2cd6c9eed21..71c844e7051 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DebouncerTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java new file mode 100644 index 00000000000..45d0239b604 --- /dev/null +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/PromiseCombinerTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.concurrent; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.ImmediateEventExecutor; +import io.netty.util.concurrent.Promise; +import java.io.IOException; +import org.junit.Test; + +public class PromiseCombinerTest { + + private final EventExecutor executor = ImmediateEventExecutor.INSTANCE; + + @Test + public void should_complete_normally_if_all_parents_complete_normally() { + // given + Promise promise = executor.newPromise(); + Promise parent1 = executor.newPromise(); + Promise parent2 = executor.newPromise(); + // when + PromiseCombiner.combine(promise, parent1, parent2); + parent1.setSuccess(null); + parent2.setSuccess(null); + // then + assertThat(promise.isSuccess()).isTrue(); + } + + @Test + public void should_complete_exceptionally_if_any_parent_completes_exceptionally() { + // given + Promise promise = executor.newPromise(); + Promise parent1 = executor.newPromise(); + Promise parent2 = executor.newPromise(); + Promise parent3 = executor.newPromise(); + NullPointerException npe = new NullPointerException(); + IOException ioe = new IOException(); + // when + PromiseCombiner.combine(promise, parent1, parent2, parent3); + parent1.setSuccess(null); + parent2.setFailure(npe); + parent3.setFailure(ioe); + // then + assertThat(promise.isSuccess()).isFalse(); + assertThat(promise.cause()).isSameAs(npe).hasSuppressedException(ioe); + } +} diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java index 24bd02b1b73..0e541c13f92 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReconnectionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java index 6dcda9119f1..65a2ee69b76 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ReplayingEventFilterTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java index 79f56fb3215..295fa545c76 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoop.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,7 +49,7 @@ @SuppressWarnings("FunctionalInterfaceClash") // does not matter for test code public class ScheduledTaskCapturingEventLoop extends DefaultEventLoop { - private final BlockingQueue capturedTasks = new ArrayBlockingQueue<>(100); + private final BlockingQueue> capturedTasks = new ArrayBlockingQueue<>(100); public ScheduledTaskCapturingEventLoop(EventLoopGroup parent) { super(parent); diff --git a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java index 61bc52c99e1..cf0314cc335 100644 --- a/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java +++ b/core/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/ScheduledTaskCapturingEventLoopTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md b/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md new file mode 100644 index 00000000000..9ff9b622e5b --- /dev/null +++ b/core/src/test/resources/ReloadingKeyManagerFactoryTest/README.md @@ -0,0 +1,39 @@ +# How to create cert stores for ReloadingKeyManagerFactoryTest + +Need the following cert stores: +- `server.keystore` +- `client-original.keystore` +- `client-alternate.keystore` +- `server.truststore`: trusts `client-original.keystore` and `client-alternate.keystore` +- `client.truststore`: trusts `server.keystore` + +We shouldn't need any signing requests or chains of trust, since truststores are just including certs directly. + +First create the three keystores: +``` +$ keytool -genkeypair -keyalg RSA -alias server -keystore server.keystore -dname "CN=server" -storepass changeit -keypass changeit +$ keytool -genkeypair -keyalg RSA -alias client-original -keystore client-original.keystore -dname "CN=client-original" -storepass changeit -keypass changeit +$ keytool -genkeypair -keyalg RSA -alias client-alternate -keystore client-alternate.keystore -dname "CN=client-alternate" -storepass changeit -keypass changeit +``` + +Note that we need to use `-keyalg RSA` because keytool's default keyalg is DSA, which TLS 1.3 doesn't support. If DSA is +used, the handshake will fail due to the server not being able to find any authentication schemes compatible with its +x509 certificate ("Unavailable authentication scheme"). + +Then export all the certs: +``` +$ keytool -exportcert -keystore server.keystore -alias server -file server.cert -storepass changeit +$ keytool -exportcert -keystore client-original.keystore -alias client-original -file client-original.cert -storepass changeit +$ keytool -exportcert -keystore client-alternate.keystore -alias client-alternate -file client-alternate.cert -storepass changeit +``` + +Then create the server.truststore that trusts the two client certs: +``` +$ keytool -import -file client-original.cert -alias client-original -keystore server.truststore -storepass changeit +$ keytool -import -file client-alternate.cert -alias client-alternate -keystore server.truststore -storepass changeit +``` + +Then create the client.truststore that trusts the server cert: +``` +$ keytool -import -file server.cert -alias server -keystore client.truststore -storepass changeit +``` diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore new file mode 100644 index 00000000000..91cee636a0b Binary files /dev/null and b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-alternate.keystore differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore new file mode 100644 index 00000000000..74e31f7bc6f Binary files /dev/null and b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client-original.keystore differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore new file mode 100644 index 00000000000..3ce9a720dbc Binary files /dev/null and b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/client.truststore differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore new file mode 100644 index 00000000000..7d279638a34 Binary files /dev/null and b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.keystore differ diff --git a/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore new file mode 100644 index 00000000000..c9b06b5fbe1 Binary files /dev/null and b/core/src/test/resources/ReloadingKeyManagerFactoryTest/certs/server.truststore differ diff --git a/core/src/test/resources/application.conf b/core/src/test/resources/application.conf new file mode 100644 index 00000000000..efea37cc078 --- /dev/null +++ b/core/src/test/resources/application.conf @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +datastax-java-driver { + basic.request.timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} +} diff --git a/core/src/test/resources/config/cloud/creds.zip b/core/src/test/resources/config/cloud/creds.zip new file mode 100644 index 00000000000..3b5d1cb1cbd Binary files /dev/null and b/core/src/test/resources/config/cloud/creds.zip differ diff --git a/core/src/test/resources/config/cloud/identity.jks b/core/src/test/resources/config/cloud/identity.jks new file mode 100644 index 00000000000..bac5bbaa965 Binary files /dev/null and b/core/src/test/resources/config/cloud/identity.jks differ diff --git a/core/src/test/resources/config/cloud/metadata.json b/core/src/test/resources/config/cloud/metadata.json new file mode 100644 index 00000000000..35aa26f67f1 --- /dev/null +++ b/core/src/test/resources/config/cloud/metadata.json @@ -0,0 +1 @@ +{"region":"local","contact_info":{"type":"sni_proxy","local_dc":"dc1","contact_points":["4ac06655-f861-49f9-881e-3fee22e69b94","2af7c253-3394-4a0d-bfac-f1ad81b5154d","b17b6e2a-3f48-4d6a-81c1-20a0a1f3192a"],"sni_proxy_address":"localhost:30002"}} diff --git a/core/src/test/resources/config/cloud/trustStore.jks b/core/src/test/resources/config/cloud/trustStore.jks new file mode 100644 index 00000000000..8ee03f97da0 Binary files /dev/null and b/core/src/test/resources/config/cloud/trustStore.jks differ diff --git a/core/src/test/resources/config/customApplication.conf b/core/src/test/resources/config/customApplication.conf index dc0c6d19b45..c3e3dc7b468 100644 --- a/core/src/test/resources/config/customApplication.conf +++ b/core/src/test/resources/config/customApplication.conf @@ -1,4 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + datastax-java-driver { // Check that references to other options in `reference.conf` are correctly resolved basic.request.timeout = ${datastax-java-driver.advanced.connection.init-query-timeout} + + advanced.continuous-paging.max-pages = 10 } diff --git a/core/src/test/resources/config/customApplication.json b/core/src/test/resources/config/customApplication.json index 2527d9908d8..4988a72cd9a 100644 --- a/core/src/test/resources/config/customApplication.json +++ b/core/src/test/resources/config/customApplication.json @@ -4,6 +4,11 @@ "request": { "page-size": "2000" } + }, + "advanced": { + "continuous-paging": { + "page-size": 2000 + } } } } diff --git a/core/src/test/resources/config/customApplication.properties b/core/src/test/resources/config/customApplication.properties index 6e971ef1d84..4c1d1ea0647 100644 --- a/core/src/test/resources/config/customApplication.properties +++ b/core/src/test/resources/config/customApplication.properties @@ -1,11 +1,13 @@ # -# Copyright DataStax, Inc. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -15,3 +17,4 @@ # datastax-java-driver.basic.request.consistency=ONE +datastax-java-driver.advanced.continuous-paging.max-enqueued-pages = 8 \ No newline at end of file diff --git a/core/src/test/resources/insights/duplicate-dependencies.txt b/core/src/test/resources/insights/duplicate-dependencies.txt new file mode 100644 index 00000000000..a808dff3f57 --- /dev/null +++ b/core/src/test/resources/insights/duplicate-dependencies.txt @@ -0,0 +1,2 @@ +io.netty:netty-handler:jar:4.0.56.Final:compile +io.netty:netty-handler:jar:4.1.2.Final:compile \ No newline at end of file diff --git a/core/src/test/resources/insights/malformed-pom.properties b/core/src/test/resources/insights/malformed-pom.properties new file mode 100644 index 00000000000..0a503062fbd --- /dev/null +++ b/core/src/test/resources/insights/malformed-pom.properties @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#Created by Apache Maven 3.5.0 +#no version +groupId=io.netty +artifactId=netty-handler \ No newline at end of file diff --git a/core/src/test/resources/insights/netty-dependency-optional.txt b/core/src/test/resources/insights/netty-dependency-optional.txt new file mode 100644 index 00000000000..2bd0cd21a0c --- /dev/null +++ b/core/src/test/resources/insights/netty-dependency-optional.txt @@ -0,0 +1 @@ +io.netty:netty-handler:jar:4.0.0.Final:compile (optional) \ No newline at end of file diff --git a/core/src/test/resources/insights/netty-dependency.txt b/core/src/test/resources/insights/netty-dependency.txt new file mode 100644 index 00000000000..69c350c30e8 --- /dev/null +++ b/core/src/test/resources/insights/netty-dependency.txt @@ -0,0 +1 @@ +io.netty:netty-handler:jar:4.0.0.Final:runtime \ No newline at end of file diff --git a/core/src/test/resources/insights/ordered-dependencies.txt b/core/src/test/resources/insights/ordered-dependencies.txt new file mode 100644 index 00000000000..a5518f89736 --- /dev/null +++ b/core/src/test/resources/insights/ordered-dependencies.txt @@ -0,0 +1,3 @@ +b-org.com:art1:jar:1.0:compile +a-org.com:art1:jar:2.0:compile +c-org.com:art1:jar:3.0:compile \ No newline at end of file diff --git a/core/src/test/resources/insights/pom.properties b/core/src/test/resources/insights/pom.properties new file mode 100644 index 00000000000..e68a31c8fc7 --- /dev/null +++ b/core/src/test/resources/insights/pom.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#Created by Apache Maven 3.5.0 +version=4.0.56.Final +groupId=io.netty +artifactId=netty-handler + diff --git a/core/src/test/resources/insights/test-dependencies.txt b/core/src/test/resources/insights/test-dependencies.txt new file mode 100644 index 00000000000..e9186a35e6b --- /dev/null +++ b/core/src/test/resources/insights/test-dependencies.txt @@ -0,0 +1,31 @@ + +The following files have been resolved: + com.github.jnr:jffi:jar:1.2.16:compile + org.ow2.asm:asm:jar:5.0.3:compile + com.github.jnr:jnr-constants:jar:0.9.9:compile + com.esri.geometry:esri-geometry-api:jar:1.2.1:compile + com.google.guava:guava:jar:19.0:compile + com.fasterxml.jackson.core:jackson-annotations:jar:2.8.11:compile + com.github.jnr:jnr-posix:jar:3.0.44:compile + org.codehaus.jackson:jackson-core-asl:jar:1.9.12:compile + io.netty:netty-handler:jar:4.0.56.Final:compile + org.ow2.asm:asm-commons:jar:5.0.3:compile + org.ow2.asm:asm-util:jar:5.0.3:compile + org.xerial.snappy:snappy-java:jar:1.1.2.6:compile (optional) + io.netty:netty-buffer:jar:4.0.56.Final:compile + com.github.jnr:jnr-ffi:jar:2.1.7:compile + com.fasterxml.jackson.core:jackson-core:jar:2.8.11:compile + org.hdrhistogram:HdrHistogram:jar:2.1.10:compile (optional) + org.ow2.asm:asm-tree:jar:5.0.3:compile + at.yawk.lz4:lz4-java:jar:1.10.1:compile (optional) + io.netty:netty-transport:jar:4.0.56.Final:compile + io.dropwizard.metrics:metrics-core:jar:3.2.2:compile + io.netty:netty-common:jar:4.0.56.Final:compile + com.fasterxml.jackson.core:jackson-databind:jar:2.7.9.3:compile + org.slf4j:slf4j-api:jar:1.7.25:compile + io.netty:netty-transport-native-epoll:jar:4.0.56.Final:compile (optional) + org.ow2.asm:asm-analysis:jar:5.0.3:compile + com.github.jnr:jnr-x86asm:jar:1.0.2:compile + io.netty:netty-codec:jar:4.0.56.Final:compile + org.json:json:jar:20090211:compile + com.github.jnr:jffi:jar:native:1.2.16:runtime \ No newline at end of file diff --git a/core/src/test/resources/logback-test.xml b/core/src/test/resources/logback-test.xml index 620eccb1c0c..1424331a31d 100644 --- a/core/src/test/resources/logback-test.xml +++ b/core/src/test/resources/logback-test.xml @@ -1,12 +1,15 @@ + - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - \ No newline at end of file + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + diff --git a/core/src/test/resources/project.properties b/core/src/test/resources/project.properties new file mode 100644 index 00000000000..66eab90b6e4 --- /dev/null +++ b/core/src/test/resources/project.properties @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +project.basedir=${basedir} \ No newline at end of file diff --git a/distribution-source/pom.xml b/distribution-source/pom.xml new file mode 100644 index 00000000000..4c1f11e53a8 --- /dev/null +++ b/distribution-source/pom.xml @@ -0,0 +1,125 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + + java-driver-distribution-source + pom + Apache Cassandra Java Driver - source distribution + + apache-cassandra-java-driver-${project.version}-source + + + maven-jar-plugin + + + + default-jar + none + + + + + maven-source-plugin + + true + + + + maven-install-plugin + + true + + + + maven-deploy-plugin + + true + + + + org.revapi + revapi-maven-plugin + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + + + + + + release + + + + maven-assembly-plugin + + + assemble-source-tarball + package + + single + + + + + false + + src/assembly/source-tarball.xml + + posix + + + + net.nicoulaj.maven.plugins + checksum-maven-plugin + 1.7 + + + + artifacts + + + + + true + + sha256 + sha512 + + + + + + + + diff --git a/distribution-source/src/assembly/source-tarball.xml b/distribution-source/src/assembly/source-tarball.xml new file mode 100644 index 00000000000..b3e2d0f463a --- /dev/null +++ b/distribution-source/src/assembly/source-tarball.xml @@ -0,0 +1,43 @@ + + + + source-tarball + + tar.gz + + + + .. + . + true + + + **/*.iml + **/.classpath + **/.project + **/.java-version + **/.flattened-pom.xml + **/dependency-reduced-pom.xml + **/${project.build.directory}/** + + + + diff --git a/distribution-tests/pom.xml b/distribution-tests/pom.xml new file mode 100644 index 00000000000..9cef313f8a5 --- /dev/null +++ b/distribution-tests/pom.xml @@ -0,0 +1,122 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + + java-driver-distribution-tests + Apache Cassandra Java Driver - distribution tests + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + + + + org.apache.cassandra + java-driver-test-infra + test + + + org.apache.cassandra + java-driver-query-builder + test + + + org.apache.cassandra + java-driver-mapper-processor + test + + + org.apache.cassandra + java-driver-mapper-runtime + test + + + org.apache.cassandra + java-driver-core + test + + + org.apache.cassandra + java-driver-metrics-micrometer + test + + + org.apache.cassandra + java-driver-metrics-microprofile + test + + + junit + junit + test + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${testing.jvm}/bin/java + ${mockitoopens.argline} + 1 + + + + org.revapi + revapi-maven-plugin + + true + + + + maven-install-plugin + + true + + + + maven-deploy-plugin + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + + + + diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java new file mode 100644 index 00000000000..16952e3d771 --- /dev/null +++ b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/DriverDependencyTest.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.querybuilder.QueryBuilder; +import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; +import com.datastax.oss.driver.internal.core.util.Reflection; +import com.datastax.oss.driver.internal.mapper.processor.MapperProcessor; +import com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory; +import com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory; +import org.junit.Test; + +public class DriverDependencyTest { + @Test + public void should_include_core_jar() { + assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.core.session.Session")) + .isEqualTo(Session.class); + } + + @Test + public void should_include_query_builder_jar() { + assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.querybuilder.QueryBuilder")) + .isEqualTo(QueryBuilder.class); + } + + @Test + public void should_include_mapper_processor_jar() { + assertThat( + Reflection.loadClass( + null, "com.datastax.oss.driver.internal.mapper.processor.MapperProcessor")) + .isEqualTo(MapperProcessor.class); + } + + @Test + public void should_include_mapper_runtime_jar() { + assertThat(Reflection.loadClass(null, "com.datastax.oss.driver.api.mapper.MapperBuilder")) + .isEqualTo(MapperBuilder.class); + } + + @Test + public void should_include_metrics_micrometer_jar() { + assertThat( + Reflection.loadClass( + null, + "com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory")) + .isEqualTo(MicrometerMetricsFactory.class); + } + + @Test + public void should_include_metrics_microprofile_jar() { + assertThat( + Reflection.loadClass( + null, + "com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory")) + .isEqualTo(MicroProfileMetricsFactory.class); + } + + @Test + public void should_include_test_infra_jar() { + assertThat( + Reflection.loadClass( + null, "com.datastax.oss.driver.api.testinfra.CassandraResourceRule")) + .isEqualTo(CassandraResourceRule.class); + } +} diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java new file mode 100644 index 00000000000..28626413487 --- /dev/null +++ b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/OptionalDependencyTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.internal.core.util.Dependency; +import com.datastax.oss.driver.internal.core.util.Reflection; +import org.junit.Test; + +public class OptionalDependencyTest { + @Test + public void should_not_include_snappy_jar() { + Dependency.SNAPPY + .classes() + .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); + } + + @Test + public void should_not_include_l4z_jar() { + Dependency.LZ4 + .classes() + .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); + } + + @Test + public void should_not_include_esri_jar() { + Dependency.ESRI + .classes() + .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); + } + + @Test + public void should_not_include_tinkerpop_jar() { + Dependency.TINKERPOP + .classes() + .forEach(clazz -> assertThat(Reflection.loadClass(null, clazz)).isNull()); + } +} diff --git a/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java new file mode 100644 index 00000000000..1070bbc2fb1 --- /dev/null +++ b/distribution-tests/src/test/java/com/datastax/oss/driver/api/core/ProvidedDependencyTest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.internal.core.util.Reflection; +import org.junit.Test; + +public class ProvidedDependencyTest { + @Test + public void should_not_include_graal_sdk_jar() { + assertThat(Reflection.loadClass(null, "org.graalvm.nativeimage.VMRuntime")).isNull(); + } + + @Test + public void should_not_include_spotbugs_annotations_jar() { + assertThat(Reflection.loadClass(null, "edu.umd.cs.findbugs.annotations.NonNull")).isNull(); + } + + @Test + public void should_not_include_jicp_annotations_jar() { + assertThat(Reflection.loadClass(null, "net.jcip.annotations.ThreadSafe")).isNull(); + } + + @Test + public void should_not_include_blockhound_jar() { + assertThat(Reflection.loadClass(null, "reactor.blockhound.BlockHoundRuntime")).isNull(); + } +} diff --git a/distribution/pom.xml b/distribution/pom.xml index 877191395d6..20b9afc1bcd 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-distribution jar - - DataStax Java driver for Apache Cassandra(R) - binary distribution - + Apache Cassandra Java Driver - binary distribution + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + ${project.groupId} @@ -45,10 +55,19 @@ java-driver-query-builder ${project.version} + + ${project.groupId} + java-driver-mapper-runtime + ${project.version} + + + ${project.groupId} + java-driver-mapper-processor + ${project.version} + - - datastax-java-driver-${project.version} + apache-cassandra-java-driver-${project.version} maven-jar-plugin @@ -94,35 +113,11 @@ - release - - maven-javadoc-plugin - - - dependencies-javadoc - - process-classes - - jar - - - true - DataStax Java driver for Apache Cassandra® ${project.version} API - - DataStax Java driver for Apache Cassandra(R) ${project.version} API - - - - - maven-assembly-plugin @@ -142,8 +137,27 @@ posix + + net.nicoulaj.maven.plugins + checksum-maven-plugin + 1.7 + + + + artifacts + + + + + true + + sha256 + sha512 + + + - \ No newline at end of file + diff --git a/distribution/src/assembly/binary-tarball.xml b/distribution/src/assembly/binary-tarball.xml index 982b38a9850..b6294a25340 100644 --- a/distribution/src/assembly/binary-tarball.xml +++ b/distribution/src/assembly/binary-tarball.xml @@ -1,12 +1,15 @@ + - + binary-tarball tar.gz true - - true - com.datastax.oss:java-driver-core + org.apache.cassandra:java-driver-core lib/core @@ -43,19 +42,20 @@ For some reason, we need to exclude all other modules here, even though our moduleSet targets core only --> - com.datastax.oss:java-driver-query-builder + org.apache.cassandra:java-driver-query-builder + org.apache.cassandra:java-driver-mapper-runtime + org.apache.cassandra:java-driver-mapper-processor true - true - com.datastax.oss:java-driver-query-builder + org.apache.cassandra:java-driver-query-builder lib/query-builder @@ -63,71 +63,114 @@ - com.datastax.oss:java-driver-core + org.apache.cassandra:java-driver-core + org.apache.cassandra:java-driver-mapper-runtime + org.apache.cassandra:java-driver-mapper-processor + org.apache.cassandra:java-driver-guava-shaded - com.datastax.oss:java-driver-shaded-guava com.github.stephenc.jcip:jcip-annotations - + com.github.spotbugs:spotbugs-annotations + + true + + + + + + + true + + org.apache.cassandra:java-driver-mapper-runtime + + + lib/mapper-runtime + false + + + + org.apache.cassandra:java-driver-core + org.apache.cassandra:java-driver-query-builder + org.apache.cassandra:java-driver-mapper-processor + org.apache.cassandra:java-driver-guava-shaded + + com.github.stephenc.jcip:jcip-annotations + com.github.spotbugs:spotbugs-annotations + + true + + + + + + + true + + org.apache.cassandra:java-driver-mapper-processor + + + lib/mapper-processor + false + + + + org.apache.cassandra:java-driver-core + org.apache.cassandra:java-driver-query-builder + org.apache.cassandra:java-driver-mapper-runtime + org.apache.cassandra:java-driver-guava-shaded + + com.github.stephenc.jcip:jcip-annotations + com.github.spotbugs:spotbugs-annotations true - true - com.datastax.oss:java-driver-core - com.datastax.oss:java-driver-query-builder + org.apache.cassandra:java-driver-core + org.apache.cassandra:java-driver-query-builder + org.apache.cassandra:java-driver-mapper-runtime + org.apache.cassandra:java-driver-mapper-processor false sources - ${module.artifactId}-${module.version}-src.zip - + ${module.artifactId}-${module.version}-src.zip src * - - - target/apidocs apidocs - .. . README* - LICENSE* + LICENSE_binary + NOTICE_binary.txt - ../changelog - ../faq - ../manual - ../upgrade_guide - - diff --git a/docs.yaml b/docs.yaml index 17989d6312f..7c679a0f47e 100644 --- a/docs.yaml +++ b/docs.yaml @@ -1,5 +1,22 @@ -title: Java Driver for Apache Cassandra™ -summary: High performance Java client for Apache Cassandra™ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +title: Java Driver +summary: Java Driver for Apache Cassandra® homepage: http://docs.datastax.com/en/developer/java-driver theme: datastax sections: diff --git a/examples/README.md b/examples/README.md index 83c61f27971..9d2210d8a4a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,6 +1,25 @@ -# DataStax Java Driver for Apache Cassandra(R) - Examples + + +# Java Driver for Apache Cassandra(R) - Examples + +This module contains examples of how to use the Java Driver for Apache Cassandra(R). ## Usage diff --git a/examples/pom.xml b/examples/pom.xml index ba662d6dafa..12e42dfdf53 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - java-driver-parent - com.datastax.oss - 4.1.1-SNAPSHOT + org.apache.cassandra + 4.19.3-SNAPSHOT - java-driver-examples - DataStax Java driver for Apache Cassandra(R) - examples. - A collection of examples to demonstrate DataStax Java Driver for Apache - Cassandra(R). - - - + Apache Cassandra Java Driver - examples. + A collection of examples to demonstrate Java Driver for Apache Cassandra(R). + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + - - ${project.groupId} java-driver-core - ${project.version} ${project.groupId} java-driver-query-builder - ${project.version} - + + ${project.groupId} + java-driver-mapper-runtime + com.fasterxml.jackson.core jackson-databind - com.fasterxml.jackson.jaxrs jackson-jaxrs-base ${jackson.version} true - com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider ${jackson.version} true - - javax.json javax.json-api true - org.glassfish javax.json true runtime - - javax.ws.rs javax.ws.rs-api true - - org.glassfish.jersey.core jersey-server true - org.glassfish.jersey.media jersey-media-json-jackson true - org.glassfish.jersey.containers jersey-container-jdk-http true - - org.glassfish.hk2 hk2-api true - org.glassfish.jersey.inject jersey-hk2 true - - javax.inject javax.inject true - javax.annotation javax.annotation-api true - ch.qos.logback logback-classic runtime + + at.favre.lib + bcrypt + 0.8.0 + + + + io.projectreactor + reactor-core + + + com.github.spotbugs + spotbugs-annotations + provided + - - + + maven-compiler-plugin + + 1.8 + 1.8 + + + org.apache.cassandra + java-driver-mapper-processor + ${project.version} + + + + + + maven-jar-plugin + + + + com.datastax.oss.driver.examples + + + + org.revapi revapi-maven-plugin @@ -188,5 +214,4 @@ - - \ No newline at end of file + diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/astra/AstraReadCassandraVersion.java b/examples/src/main/java/com/datastax/oss/driver/examples/astra/AstraReadCassandraVersion.java new file mode 100644 index 00000000000..ff8dc6d96f4 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/astra/AstraReadCassandraVersion.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.astra; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import java.nio.file.Paths; + +/** + * Connects to a DataStax Astra cluster and extracts basic information from it. + * + *

      Preconditions: + * + *

        + *
      • A DataStax Astra cluster is running and accessible. + *
      • A DataStax Astra secure connect bundle for the running cluster. + *
      + * + *

      Side effects: none. + * + * @see + * Creating an Astra Database (GCP) + * @see + * Providing access to Astra databases (GCP) + * @see + * Obtaining Astra secure connect bundle (GCP) + * @see Java Driver online + * manual + */ +public class AstraReadCassandraVersion { + + public static void main(String[] args) { + + // The Session is what you use to execute queries. It is thread-safe and should be + // reused. + try (CqlSession session = + CqlSession.builder() + // Change the path here to the secure connect bundle location (see javadocs above) + .withCloudSecureConnectBundle(Paths.get("/path/to/secure-connect-database_name.zip")) + // Change the user_name and password here for the Astra instance + .withAuthCredentials("user_name", "fakePasswordForTests") + // Uncomment the next line to use a specific keyspace + // .withKeyspace("keyspace_name") + .build()) { + + // We use execute to send a query to Cassandra. This returns a ResultSet, which + // is essentially a collection of Row objects. + ResultSet rs = session.execute("select release_version from system.local"); + // Extract the first row (which is the only one in this case). + Row row = rs.one(); + + // Extract the value of the first (and only) column from the row. + assert row != null; + String releaseVersion = row.getString("release_version"); + System.out.printf("Cassandra version is: %s%n", releaseVersion); + } + // The try-with-resources block automatically close the session after we’re done with it. + // This step is important because it frees underlying resources (TCP connections, thread + // pools...). In a real application, you would typically do this at shutdown + // (for example, when undeploying your webapp). + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java b/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java index 7b6d75c53dd..3dcfa702041 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/basic/CreateAndPopulateKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,9 +41,10 @@ *

    • inserts a row in each table. * * - * @see Java driver online + * @see Java Driver online * manual */ +@SuppressWarnings("CatchAndPrintStackTrace") public class CreateAndPopulateKeyspace { public static void main(String[] args) { diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java b/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java index 58e58fac497..63804ebfece 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadCassandraVersion.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +33,7 @@ * *

      Side effects: none. * - * @see Java driver online + * @see Java Driver online * manual */ public class ReadCassandraVersion { diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java b/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java index 012c92702d9..d61911d19b9 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/basic/ReadTopologyAndSchemaMetadata.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,7 +36,7 @@ * *

      Side effects: none. * - * @see Java driver online + * @see Java Driver online * manual */ public class ReadTopologyAndSchemaMetadata { diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustom.java b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustom.java new file mode 100644 index 00000000000..444d4f406b7 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustom.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.concurrent; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Creates a keyspace and table, and loads data using a multi-threaded approach. + * + *

      This example makes usage of a {@link CqlSession#execute(String)} method, which is responsible + * for executing requests in a blocking way. It uses {@link ExecutorService} to limit number of + * concurrent request to {@code CONCURRENCY_LEVEL}. It leverages {@link CompletableFuture} to + * achieve concurrency. It maintains at most {@code IN_FLIGHT_REQUESTS} using {@link Semaphore}. + * + *

      Preconditions: + * + *

        + *
      • An Apache Cassandra(R) cluster is running and accessible through the contact points + * identified by basic.contact-points (see application.conf). + *
      + * + *

      Side effects: + * + *

        + *
      • creates a new keyspace "examples" in the session. If a keyspace with this name already + * exists, it will be reused; + *
      • creates a table "examples.tbl_sample_kv". If it exists already, it will be reused; + *
      • inserts a TOTAL_NUMBER_OF_INSERTS of rows into the table. + *
      + * + * @see Java Driver online + * manual + */ +@SuppressWarnings("CatchAndPrintStackTrace") +public class LimitConcurrencyCustom { + private static final int CONCURRENCY_LEVEL = 32; + private static final int TOTAL_NUMBER_OF_INSERTS = 10_000; + private static final int IN_FLIGHT_REQUESTS = 500; + // Semaphore for limiting number of in-flight requests. + private static final Semaphore SEMAPHORE = new Semaphore(IN_FLIGHT_REQUESTS); + + // Create CountDownLatch that wait for completion of all pending requests + private static final CountDownLatch REQUEST_LATCH = new CountDownLatch(TOTAL_NUMBER_OF_INSERTS); + + public static void main(String[] args) throws InterruptedException { + + try (CqlSession session = new CqlSessionBuilder().build()) { + createSchema(session); + insertConcurrent(session); + } + } + + private static void insertConcurrent(CqlSession session) throws InterruptedException { + PreparedStatement pst = + session.prepare( + insertInto("examples", "tbl_sample_kv") + .value("id", bindMarker("id")) + .value("value", bindMarker("value")) + .build()); + + // Used to track number of total inserts + AtomicInteger insertsCounter = new AtomicInteger(); + + // Executor service with CONCURRENCY_LEVEL number of threads that states an upper limit + // on number of request in progress. + ExecutorService executor = Executors.newFixedThreadPool(CONCURRENCY_LEVEL); + + // For every i we will insert a record to db + for (int i = 0; i < TOTAL_NUMBER_OF_INSERTS; i++) { + // Before submitting a request, we need to acquire 1 permit. + // If there is no permits available it blocks caller thread. + SEMAPHORE.acquire(); + // Copy to final variable for usage in a separate thread + final int counter = i; + + // We are running CqlSession.execute in a separate thread pool (executor) + executor.submit( + () -> { + try { + session.execute(pst.bind().setUuid("id", Uuids.random()).setInt("value", counter)); + insertsCounter.incrementAndGet(); + } catch (Throwable t) { + // On production you should leverage logger and use logger.error() method. + t.printStackTrace(); + } finally { + // Signal that processing of this request finishes + REQUEST_LATCH.countDown(); + // Once the request is executed, we release 1 permit. + // By doing so we allow caller thread to submit another async request. + SEMAPHORE.release(); + } + }); + } + // Await for execution of TOTAL_NUMBER_OF_INSERTS + REQUEST_LATCH.await(); + + System.out.println( + String.format( + "Finished executing %s queries with a concurrency level of %s.", + insertsCounter.get(), CONCURRENCY_LEVEL)); + // Shutdown executor to free resources + executor.shutdown(); + executor.awaitTermination(10, TimeUnit.SECONDS); + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + + session.execute( + "CREATE TABLE IF NOT EXISTS examples.tbl_sample_kv (id uuid, value int, PRIMARY KEY (id))"); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustomAsync.java b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustomAsync.java new file mode 100644 index 00000000000..bec26eb7e81 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyCustomAsync.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.concurrent; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; + +/** + * Creates a keyspace and table, and loads data using an async API. + * + *

      This example makes usage of a {@link CqlSession#executeAsync(String)} method, which is + * responsible for executing requests in a non-blocking way. It uses {@link CompletableFuture} to + * limit number of concurrent request to {@code CONCURRENCY_LEVEL}. + * + *

      Preconditions: + * + *

        + *
      • An Apache Cassandra(R) cluster is running and accessible through the contact points + * identified by basic.contact-points (see application.conf). + *
      + * + *

      Side effects: + * + *

        + *
      • creates a new keyspace "examples" in the session. If a keyspace with this name already + * exists, it will be reused; + *
      • creates a table "examples.tbl_sample_kv". If it exist already, it will be reused; + *
      • inserts a TOTAL_NUMBER_OF_INSERTS of rows into the table. + *
      + * + * @see Java Driver online manual + */ +public class LimitConcurrencyCustomAsync { + private static final int CONCURRENCY_LEVEL = 32; + private static final int TOTAL_NUMBER_OF_INSERTS = 10_000; + // Used to track number of total inserts + private static final AtomicInteger INSERTS_COUNTER = new AtomicInteger(); + + public static void main(String[] args) throws InterruptedException, ExecutionException { + + try (CqlSession session = new CqlSessionBuilder().build()) { + createSchema(session); + insertConcurrent(session); + } + } + + private static void insertConcurrent(CqlSession session) + throws InterruptedException, ExecutionException { + PreparedStatement pst = + session.prepare( + insertInto("examples", "tbl_sample_kv") + .value("id", bindMarker("id")) + .value("value", bindMarker("value")) + .build()); + + // Construct CONCURRENCY_LEVEL number of ranges. + // Each range will be executed independently. + List ranges = createRanges(CONCURRENCY_LEVEL, TOTAL_NUMBER_OF_INSERTS); + + // List of pending CONCURRENCY_LEVEL features that we will wait for at the end of the program. + List> pending = new ArrayList<>(); + + // Every range will have dedicated CompletableFuture handling the execution. + for (Range range : ranges) { + pending.add(executeOneAtATime(session, pst, range)); + } + + // Wait for completion of all CONCURRENCY_LEVEL pending CompletableFeatures + CompletableFuture.allOf(pending.toArray(new CompletableFuture[0])).get(); + + System.out.println( + String.format( + "Finished executing %s queries with a concurrency level of %s.", + INSERTS_COUNTER.get(), CONCURRENCY_LEVEL)); + } + + private static CompletableFuture executeOneAtATime( + CqlSession session, PreparedStatement pst, Range range) { + + CompletableFuture lastFeature = null; + for (int i = range.getFrom(); i < range.getTo(); i++) { + int counter = i; + // If this is a first request init the lastFeature. + if (lastFeature == null) { + lastFeature = executeInsert(session, pst, counter); + } else { + // If lastFeature is already created, chain next async action. + // The next action will execute only after the lastFeature will finish. + // If the lastFeature finishes with failure, the subsequent chained executions + // will not be invoked. If you wish to alter that behaviour and recover from failure + // add the exceptionally() call after whenComplete() of lastFeature. + lastFeature = lastFeature.thenCompose((ignored) -> executeInsert(session, pst, counter)); + } + } + return lastFeature; + } + + private static CompletableFuture executeInsert( + CqlSession session, PreparedStatement pst, int counter) { + + return session + .executeAsync(pst.bind().setUuid("id", Uuids.random()).setInt("value", counter)) + .toCompletableFuture() + .whenComplete( + (BiConsumer) + (asyncResultSet, throwable) -> { + if (throwable == null) { + // When the Feature completes and there is no exception - increment counter. + INSERTS_COUNTER.incrementAndGet(); + } else { + // On production you should leverage logger and use logger.error() method. + throwable.printStackTrace(); + } + }); + } + + private static List createRanges(int concurrencyLevel, int totalNumberOfInserts) { + ArrayList ranges = new ArrayList<>(); + int numberOfElementsInRange = totalNumberOfInserts / concurrencyLevel; + // Create concurrencyLevel number of Ranges. + for (int i = 0; i < concurrencyLevel; i++) { + // If this is a last range give it all remaining elements. + // It may be longer than numberOfElementsInRange in case of + // totalNumberOfInserts / concurrencyLevel will return floating point number. + if (i == concurrencyLevel - 1) { + ranges.add(new Range(i * numberOfElementsInRange, totalNumberOfInserts)); + } else { + // Construct Ranges with numberOfElementsInRange elements. + ranges.add(new Range(i * numberOfElementsInRange, (i + 1) * numberOfElementsInRange)); + } + } + return ranges; + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + + session.execute( + "CREATE TABLE IF NOT EXISTS examples.tbl_sample_kv (id uuid, value int, PRIMARY KEY (id))"); + } + + private static class Range { + private final int from; + private final int to; + + private Range(int from, int to) { + this.from = from; + this.to = to; + } + + public int getFrom() { + return from; + } + + public int getTo() { + return to; + } + + @Override + public String toString() { + return "Range{" + "from=" + from + ", to=" + to + '}'; + } + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java index 995054f2c52..87293e50907 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/concurrent/LimitConcurrencyRequestThrottler.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,10 +23,10 @@ import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.uuid.Uuids; import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; import java.util.ArrayList; import java.util.List; -import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -86,7 +88,7 @@ private static void insertConcurrent(CqlSession session) for (int i = 0; i < TOTAL_NUMBER_OF_INSERTS; i++) { pending.add( session - .executeAsync(pst.bind().setUuid("id", UUID.randomUUID()).setInt("value", i)) + .executeAsync(pst.bind().setUuid("id", Uuids.random()).setInt("value", i)) // Transform CompletionState toCompletableFuture to be able to wait for execution of // all using CompletableFuture.allOf .toCompletableFuture()); diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java index 343a03306e9..750ee49f685 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +21,7 @@ import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.PreparedStatement; import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.protocol.internal.util.Bytes; +import com.datastax.oss.driver.api.core.data.ByteUtils; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; @@ -33,7 +35,7 @@ /** * Inserts and retrieves values in BLOB columns. * - *

      By default, the Java driver maps this type to {@link java.nio.ByteBuffer}. The ByteBuffer API + *

      By default, the Java Driver maps this type to {@link java.nio.ByteBuffer}. The ByteBuffer API * is a bit tricky to use at times, so we will show common pitfalls as well. We strongly recommend * that you read the {@link java.nio.Buffer} and {@link ByteBuffer} API docs and become familiar * with the capacity, limit and position properties. Preconditions: + * + *

      + * + *

      Side effects: + * + *

        + *
      • creates a new keyspace "examples" in the cluster. If a keyspace with this name already + * exists, it will be reused; + *
      • creates a table "examples.videos". If it already exists, it will be reused; + *
      • inserts data in the table. + *
      + * + * More examples of custom codecs can be found in the following examples: + * + *
        + *
      1. Codecs for tuples and UDTs: + *
          + *
        • {@link TuplesSimple} + *
        • {@link TuplesMapped} + *
        • {@link UserDefinedTypesSimple} + *
        • {@link UserDefinedTypesMapped} + *
        + *
      2. Json codecs: + *
          + *
        • {@link com.datastax.oss.driver.examples.json.jackson.JacksonJsonColumn} + *
        • {@link com.datastax.oss.driver.examples.json.jackson.JacksonJsonFunction} + *
        • {@link com.datastax.oss.driver.examples.json.jackson.JacksonJsonRow} + *
        • {@link com.datastax.oss.driver.examples.json.jsr.Jsr353JsonColumn} + *
        • {@link com.datastax.oss.driver.examples.json.jsr.Jsr353JsonFunction} + *
        • {@link com.datastax.oss.driver.examples.json.jsr.Jsr353JsonRow} + *
        + * + * @see
        driver + * documentation on custom codecs + */ +public class CustomCodecs { + + public static final GenericType> OPTIONAL_OF_INET = + GenericType.optionalOf(InetAddress.class); + + /** A dummy codec converting CQL ints into Java strings. */ + public static class CqlIntToStringCodec extends MappingCodec { + + public CqlIntToStringCodec() { + super(TypeCodecs.INT, GenericType.STRING); + } + + @Nullable + @Override + protected String innerToOuter(@Nullable Integer value) { + return value == null ? null : value.toString(); + } + + @Nullable + @Override + protected Integer outerToInner(@Nullable String value) { + return value == null ? null : Integer.parseInt(value); + } + } + + public enum WeekDay { + MONDAY, + TUESDAY, + WEDNESDAY, + THURSDAY, + FRIDAY, + SATURDAY, + SUNDAY + } + + public static void main(String[] args) { + CqlSessionBuilder builder = CqlSession.builder(); + builder = registerCodecs(builder); + try (CqlSession session = builder.build()) { + createSchema(session); + insertData(session); + retrieveData(session); + } + } + + private static CqlSessionBuilder registerCodecs(CqlSessionBuilder builder) { + return builder.addTypeCodecs( + ExtraTypeCodecs.BLOB_TO_ARRAY, // blob <-> byte[] + ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED, // tuple <-> ZonedDateTime + ExtraTypeCodecs.listToArrayOf(TypeCodecs.TEXT), // list <-> String[] + ExtraTypeCodecs.enumNamesOf(WeekDay.class), // text <-> MyEnum + ExtraTypeCodecs.optionalOf(TypeCodecs.INET), // uuid <-> Optional + new CqlIntToStringCodec() // custom codec, int <-> String + ); + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.videos(" + + "pk int PRIMARY KEY, " + + "contents blob, " + + "uploaded tuple, " + + "tags list, " + + "week_day text, " + + "ip inet" + + ")"); + } + + private static void insertData(CqlSession session) { + // prepare the INSERT statement + PreparedStatement prepared = + session.prepare( + "INSERT INTO examples.videos (pk, contents, uploaded, tags, week_day, ip) " + + "VALUES (:pk, :contents, :uploaded, :tags, :week_day, :ip)"); + + byte[] contents = new byte[] {1, 2, 3, 4}; + ZonedDateTime uploaded = ZonedDateTime.parse("2020-03-21T15:03:45.123+01:00[Europe/Paris]"); + String[] tags = new String[] {"comedy", "US"}; + WeekDay weekDay = WeekDay.SATURDAY; + Optional maybeIp = Optional.empty(); + + // Create a BoundStatement and set values + BoundStatement boundStatement = + prepared + .bind() + .setString("pk", "1") // will use CqlIntToStringCodec + .set("contents", contents, byte[].class) // will use TypeCodecs.BLOB_SIMPLE + .set( + "uploaded", + uploaded, + ZonedDateTime.class) // will use TypeCodecs.ZONED_TIMESTAMP_PERSISTED + .set("tags", tags, String[].class) // will use TypeCodecs.arrayOf(TypeCodecs.TEXT) + .set( + "week_day", + weekDay, + WeekDay.class) // will use TypeCodecs.enumNamesOf(WeekDay.class) + .set( + "ip", maybeIp, OPTIONAL_OF_INET); // will use TypeCodecs.optionalOf(TypeCodecs.INET) + + // execute the insertion + session.execute(boundStatement); + } + + private static void retrieveData(CqlSession session) { + // Execute the SELECT query and retrieve the single row in the result set + SimpleStatement statement = + SimpleStatement.newInstance( + "SELECT pk, contents, uploaded, tags, week_day, ip FROM examples.videos WHERE pk = ?", + // Here, the primary key must be provided as an int, not as a String, because it is not + // possible to use custom codecs in simple statements, only driver built-in codecs. + // If this is an issue, use prepared statements. + 1); + Row row = session.execute(statement).one(); + assert row != null; + + { + // Retrieve values from row using custom codecs + String pk = row.getString("pk"); // will use CqlIntToStringCodec + byte[] contents = row.get("contents", byte[].class); // will use TypeCodecs.BLOB_SIMPLE + ZonedDateTime uploaded = + row.get("uploaded", ZonedDateTime.class); // will use TypeCodecs.ZONED_TIMESTAMP_PERSISTED + String[] tags = + row.get("tags", String[].class); // will use TypeCodecs.arrayOf(TypeCodecs.TEXT) + WeekDay weekDay = + row.get("week_day", WeekDay.class); // will use TypeCodecs.enumNamesOf(WeekDay.class) + Optional maybeIp = + row.get("ip", OPTIONAL_OF_INET); // will use TypeCodecs.optionalOf(TypeCodecs.INET) + + System.out.println("pk: " + pk); + System.out.println("contents: " + Arrays.toString(contents)); + System.out.println("uploaded: " + uploaded); + System.out.println("tags: " + Arrays.toString(tags)); + System.out.println("week day: " + weekDay); + System.out.println("ip: " + maybeIp); + } + + System.out.println("------------------"); + + { + // It is still possible to retrieve the same values from row using driver built-in codecs + int pk = row.getInt("pk"); + ByteBuffer contents = row.getByteBuffer("contents"); + TupleValue uploaded = row.getTupleValue("uploaded"); + List tags = row.getList("tags", String.class); + String weekDay = row.getString("week_day"); + InetAddress ip = row.getInetAddress("ip"); + + System.out.println("pk: " + pk); + System.out.println("contents: " + ByteUtils.toHexString(contents)); + System.out.println( + "uploaded: " + (uploaded == null ? null : uploaded.getFormattedContents())); + System.out.println("tags: " + tags); + System.out.println("week day: " + weekDay); + System.out.println("ip: " + ip); + } + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesMapped.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesMapped.java new file mode 100644 index 00000000000..1d06fc447ce --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesMapped.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.datatypes; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Objects; + +/** + * Inserts and retrieves values in columns of tuples. + * + *

        By default, the Java Driver maps tuples to {@link TupleValue}. This example goes beyond that + * and shows how to map tuples to arbitrary Java types, leveraging the special {@link MappingCodec}. + * + *

        A simpler example of usage of tuples can be found in {@link TuplesSimple}. + * + *

        Preconditions: + * + *

          + *
        • An Apache Cassandra(R) cluster is running and accessible through the contacts points + * identified by basic.contact-points (see application.conf). + *
        + * + *

        Side effects: + * + *

          + *
        • creates a new keyspace "examples" in the cluster. If a keyspace with this name already + * exists, it will be reused; + *
        • creates a table "examples.tuples". If it already exists, it will be reused; + *
        • inserts data in the table. + *
        + * + * @see TuplesSimple + * @see MappingCodec + * @see driver + * documentation on custom codecs + */ +public class TuplesMapped { + + /** The Java Pojo that will be mapped to the tuple "coordinates". */ + public static class Coordinates { + + private final int x; + private final int y; + + public Coordinates(int x, int y) { + this.x = x; + this.y = y; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } else if (!(o instanceof Coordinates)) { + return false; + } else { + Coordinates that = (Coordinates) o; + return x == that.x && y == that.y; + } + } + + @Override + public int hashCode() { + return Objects.hash(x, y); + } + + @Override + public String toString() { + return "(" + x + ',' + y + ')'; + } + } + + /** The custom codec that will convert to and from {@link Coordinates}. */ + public static class CoordinatesCodec extends MappingCodec { + + public CoordinatesCodec(@NonNull TypeCodec innerCodec) { + super(innerCodec, GenericType.of(Coordinates.class)); + } + + @NonNull + @Override + public TupleType getCqlType() { + return (TupleType) super.getCqlType(); + } + + @Nullable + @Override + protected Coordinates innerToOuter(@Nullable TupleValue value) { + return value == null ? null : new Coordinates(value.getInt(0), value.getInt(1)); + } + + @Nullable + @Override + protected TupleValue outerToInner(@Nullable Coordinates value) { + return value == null + ? null + : this.getCqlType().newValue().setInt(0, value.x).setInt(1, value.y); + } + } + + public static void main(String[] args) { + try (CqlSession session = CqlSession.builder().build()) { + createSchema(session); + registerCoordinatesCodec(session); + insertData(session); + retrieveData(session); + } + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.tuples(k int PRIMARY KEY, c tuple)"); + } + + private static void registerCoordinatesCodec(CqlSession session) { + // retrieve the codec registry + MutableCodecRegistry codecRegistry = + (MutableCodecRegistry) session.getContext().getCodecRegistry(); + // create the tuple metadata + TupleType coordinatesType = DataTypes.tupleOf(DataTypes.INT, DataTypes.INT); + // retrieve the driver built-in codec for the tuple "coordinates" + TypeCodec innerCodec = codecRegistry.codecFor(coordinatesType); + // create a custom codec to map the "coordinates" tuple to the Coordinates class + CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); + // register the new codec + codecRegistry.register(coordinatesCodec); + } + + private static void insertData(CqlSession session) { + // prepare the INSERT statement + PreparedStatement prepared = + session.prepare("INSERT INTO examples.tuples (k, c) VALUES (?, ?)"); + + // bind the parameters in one pass + Coordinates coordinates1 = new Coordinates(12, 34); + BoundStatement boundStatement1 = prepared.bind(1, coordinates1); + // execute the insertion + session.execute(boundStatement1); + + // alternate method: bind the parameters one by one + Coordinates coordinates2 = new Coordinates(56, 78); + BoundStatement boundStatement2 = + prepared.bind().setInt("k", 2).set("c", coordinates2, Coordinates.class); + // execute the insertion + session.execute(boundStatement2); + } + + private static void retrieveData(CqlSession session) { + for (int k = 1; k <= 2; k++) { + // Execute the SELECT query and retrieve the single row in the result set + SimpleStatement statement = + SimpleStatement.newInstance("SELECT c FROM examples.tuples WHERE k = ?", k); + Row row = session.execute(statement).one(); + assert row != null; + + // Retrieve the value for column c + Coordinates coordinatesValue = row.get("c", Coordinates.class); + assert coordinatesValue != null; + + // Display the contents of the Coordinates instance + System.out.println("found coordinate: " + coordinatesValue); + } + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesSimple.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesSimple.java new file mode 100644 index 00000000000..6aee96bac7f --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/TuplesSimple.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.datatypes; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.TupleType; + +/** + * Inserts and retrieves values in columns of tuple types. + * + *

        By default, the Java Driver maps tuples to {@link TupleValue}. This example shows how to + * create instances of {@link TupleValue}, how to insert them in the database, and how to retrieve + * such instances from the database. + * + *

        For a more complex example showing how to map tuples to arbitrary Java types, see {@link + * TuplesMapped}. + * + *

        Preconditions: + * + *

          + *
        • An Apache Cassandra(R) cluster is running and accessible through the contacts points + * identified by basic.contact-points (see application.conf). + *
        + * + *

        Side effects: + * + *

          + *
        • creates a new keyspace "examples" in the cluster. If a keyspace with this name already + * exists, it will be reused; + *
        • creates a table "examples.tuples". If it already exists, it will be reused; + *
        • inserts data in the table. + *
        + * + * @see driver + * documentation on custom codecs + * @see TuplesMapped + */ +public class TuplesSimple { + + public static void main(String[] args) { + try (CqlSession session = CqlSession.builder().build()) { + createSchema(session); + insertData(session); + retrieveData(session); + } + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute( + "CREATE TABLE IF NOT EXISTS examples.tuples(k int PRIMARY KEY, c tuple)"); + } + + private static void insertData(CqlSession session) { + // prepare the INSERT statement + PreparedStatement prepared = + session.prepare("INSERT INTO examples.tuples (k, c) VALUES (?, ?)"); + + // create the tuple metadata + TupleType coordinatesType = DataTypes.tupleOf(DataTypes.INT, DataTypes.INT); + + // bind the parameters in one pass + TupleValue coordinates1 = coordinatesType.newValue(12, 34); + BoundStatement boundStatement1 = prepared.bind(1, coordinates1); + // execute the insertion + session.execute(boundStatement1); + + // alternate method: bind the parameters one by one + TupleValue coordinates2 = coordinatesType.newValue(56, 78); + BoundStatement boundStatement2 = + prepared.bind().setInt("k", 2).setTupleValue("c", coordinates2); + // execute the insertion + session.execute(boundStatement2); + } + + private static void retrieveData(CqlSession session) { + for (int k = 1; k <= 2; k++) { + // Execute the SELECT query and retrieve the single row in the result set + SimpleStatement statement = + SimpleStatement.newInstance("SELECT c FROM examples.tuples WHERE k = ?", k); + Row row = session.execute(statement).one(); + assert row != null; + + // Retrieve the value for column c + TupleValue coordinatesValue = row.getTupleValue("c"); + assert coordinatesValue != null; + + // Display the contents of the tuple + System.out.printf( + "found coordinate: (%d,%d)%n", coordinatesValue.getInt(0), coordinatesValue.getInt(1)); + } + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesMapped.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesMapped.java new file mode 100644 index 00000000000..ef97f507746 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesMapped.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.datatypes; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Objects; + +/** + * Inserts and retrieves values in columns of user-defined types. + * + *

        By default, the Java Driver maps user-defined types to {@link UdtValue}. This example goes + * beyond that and shows how to map user-defined types to arbitrary Java types, leveraging the + * special {@link MappingCodec}. + * + *

        A simpler example of usage of user-defined types can be found in {@link + * UserDefinedTypesSimple}. + * + *

        Preconditions: + * + *

          + *
        • An Apache Cassandra(R) cluster is running and accessible through the contacts points + * identified by basic.contact-points (see application.conf). + *
        + * + *

        Side effects: + * + *

          + *
        • creates a new keyspace "examples" in the cluster. If a keyspace with this name already + * exists, it will be reused; + *
        • creates a table "examples.udts". If it already exists, it will be reused; + *
        • inserts data in the table. + *
        + * + * @see UserDefinedTypesSimple + * @see MappingCodec + * @see driver + * documentation on custom codecs + */ +public class UserDefinedTypesMapped { + + /** The Java Pojo that will be mapped to the user-defined type "coordinates". */ + public static class Coordinates { + + private final int x; + private final int y; + + public Coordinates(int x, int y) { + this.x = x; + this.y = y; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } else if (!(o instanceof Coordinates)) { + return false; + } else { + Coordinates that = (Coordinates) o; + return x == that.x && y == that.y; + } + } + + @Override + public int hashCode() { + return Objects.hash(x, y); + } + + @Override + public String toString() { + return "(" + x + ',' + y + ')'; + } + } + + /** The custom codec that will convert to and from {@link Coordinates}. */ + public static class CoordinatesCodec extends MappingCodec { + + public CoordinatesCodec(@NonNull TypeCodec innerCodec) { + super(innerCodec, GenericType.of(Coordinates.class)); + } + + @NonNull + @Override + public UserDefinedType getCqlType() { + return (UserDefinedType) super.getCqlType(); + } + + @Nullable + @Override + protected Coordinates innerToOuter(@Nullable UdtValue value) { + return value == null ? null : new Coordinates(value.getInt("x"), value.getInt("y")); + } + + @Nullable + @Override + protected UdtValue outerToInner(@Nullable Coordinates value) { + return value == null + ? null + : getCqlType().newValue().setInt("x", value.x).setInt("y", value.y); + } + } + + public static void main(String[] args) { + try (CqlSession session = CqlSession.builder().build()) { + createSchema(session); + registerCoordinatesCodec(session); + insertData(session); + retrieveData(session); + } + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute("CREATE TYPE IF NOT EXISTS examples.coordinates(x int, y int)"); + session.execute("CREATE TABLE IF NOT EXISTS examples.udts(k int PRIMARY KEY, c coordinates)"); + } + + private static void registerCoordinatesCodec(CqlSession session) { + // retrieve the codec registry + MutableCodecRegistry codecRegistry = + (MutableCodecRegistry) session.getContext().getCodecRegistry(); + // retrieve the user-defined type metadata + UserDefinedType coordinatesType = retrieveCoordinatesType(session); + // retrieve the driver built-in codec for the user-defined type "coordinates" + TypeCodec innerCodec = codecRegistry.codecFor(coordinatesType); + // create a custom codec to map the "coordinates" user-defined type to the Coordinates class + CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); + // register the new codec + codecRegistry.register(coordinatesCodec); + } + + private static void insertData(CqlSession session) { + // prepare the INSERT statement + PreparedStatement prepared = session.prepare("INSERT INTO examples.udts (k, c) VALUES (?, ?)"); + + // bind the parameters in one pass + Coordinates coordinates1 = new Coordinates(12, 34); + BoundStatement boundStatement1 = prepared.bind(1, coordinates1); + // execute the insertion + session.execute(boundStatement1); + + // alternate method: bind the parameters one by one + Coordinates coordinates2 = new Coordinates(56, 78); + BoundStatement boundStatement2 = + prepared.bind().setInt("k", 2).set("c", coordinates2, Coordinates.class); + // execute the insertion + session.execute(boundStatement2); + } + + private static void retrieveData(CqlSession session) { + for (int k = 1; k <= 2; k++) { + // Execute the SELECT query and retrieve the single row in the result set + SimpleStatement statement = + SimpleStatement.newInstance("SELECT c FROM examples.udts WHERE k = ?", k); + Row row = session.execute(statement).one(); + assert row != null; + + // Retrieve the value for column c + Coordinates coordinatesValue = row.get("c", Coordinates.class); + assert coordinatesValue != null; + + // Display the contents of the Coordinates instance + System.out.println("found coordinate: " + coordinatesValue); + } + } + + private static UserDefinedType retrieveCoordinatesType(CqlSession session) { + return session + .getMetadata() + .getKeyspace("examples") + .flatMap(ks -> ks.getUserDefinedType("coordinates")) + .orElseThrow(IllegalStateException::new); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesSimple.java b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesSimple.java new file mode 100644 index 00000000000..4387cde5b0f --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/UserDefinedTypesSimple.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.datatypes; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; + +/** + * Inserts and retrieves values in columns of user-defined types. + * + *

        By default, the Java Driver maps user-defined types to {@link UdtValue}. This example shows + * how to create instances of {@link UdtValue}, how to insert them in the database, and how to + * retrieve such instances from the database. + * + *

        For a more complex example showing how to map user-defined types to arbitrary Java types, see + * {@link UserDefinedTypesMapped}. + * + *

        Preconditions: + * + *

          + *
        • An Apache Cassandra(R) cluster is running and accessible through the contacts points + * identified by basic.contact-points (see application.conf). + *
        + * + *

        Side effects: + * + *

          + *
        • creates a new keyspace "examples" in the cluster. If a keyspace with this name already + * exists, it will be reused; + *
        • creates a table "examples.udts". If it already exists, it will be reused; + *
        • inserts data in the table. + *
        + * + * @see driver + * documentation on custom codecs + * @see UserDefinedTypesMapped + */ +public class UserDefinedTypesSimple { + + public static void main(String[] args) { + try (CqlSession session = CqlSession.builder().build()) { + createSchema(session); + insertData(session); + retrieveData(session); + } + } + + private static void createSchema(CqlSession session) { + session.execute( + "CREATE KEYSPACE IF NOT EXISTS examples " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute("CREATE TYPE IF NOT EXISTS examples.coordinates(x int, y int)"); + session.execute("CREATE TABLE IF NOT EXISTS examples.udts(k int PRIMARY KEY, c coordinates)"); + } + + private static void insertData(CqlSession session) { + // prepare the INSERT statement + PreparedStatement prepared = session.prepare("INSERT INTO examples.udts (k, c) VALUES (?, ?)"); + + // retrieve the user-defined type metadata + UserDefinedType coordinatesType = retrieveCoordinatesType(session); + + // bind the parameters in one pass + UdtValue coordinates1 = coordinatesType.newValue(12, 34); + BoundStatement boundStatement1 = prepared.bind(1, coordinates1); + // execute the insertion + session.execute(boundStatement1); + + // alternate method: bind the parameters one by one + UdtValue coordinates2 = coordinatesType.newValue(56, 78); + BoundStatement boundStatement2 = prepared.bind().setInt("k", 2).setUdtValue("c", coordinates2); + // execute the insertion + session.execute(boundStatement2); + } + + private static void retrieveData(CqlSession session) { + for (int k = 1; k <= 2; k++) { + // Execute the SELECT query and retrieve the single row in the result set + SimpleStatement statement = + SimpleStatement.newInstance("SELECT c FROM examples.udts WHERE k = ?", k); + Row row = session.execute(statement).one(); + assert row != null; + + // Retrieve the value for column c + UdtValue coordinatesValue = row.getUdtValue("c"); + assert coordinatesValue != null; + + // Display the contents of the UdtValue instance + System.out.printf( + "found coordinate: (%d,%d)%n", + coordinatesValue.getInt("x"), coordinatesValue.getInt("y")); + } + } + + private static UserDefinedType retrieveCoordinatesType(CqlSession session) { + return session + .getMetadata() + .getKeyspace("examples") + .flatMap(ks -> ks.getUserDefinedType("coordinates")) + .orElseThrow(IllegalStateException::new); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java b/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java new file mode 100644 index 00000000000..07907af6886 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java @@ -0,0 +1,458 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.failover; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DriverException; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.api.core.config.TypedDriverOption; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.servererrors.QueryConsistencyException; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import java.util.Collections; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; +import reactor.core.publisher.Flux; + +/** + * This example illustrates how to implement a cross-datacenter failover strategy from application + * code. + * + *

        Starting with driver 4.10, cross-datacenter failover is also provided as a configuration + * option for built-in load balancing policies. See Load + * balancing in the manual. + * + *

        This example demonstrates how to achieve the same effect in application code, which confers + * more fained-grained control over which statements should be retried and where. + * + *

        The logic that decides whether or not a cross-DC failover should be attempted is presented in + * the {@link #shouldFailover(DriverException)} method below; study it carefully and adapt it to + * your needs if necessary. + * + *

        The actual request execution and failover code is presented in 3 different programming styles: + * + *

          + *
        1. Synchronous: see the {@link #writeSync()} method below; + *
        2. Asynchronous: see the {@link #writeAsync()} method below; + *
        3. Reactive (using Reactor): see the {@link + * #writeReactive()} method below. + *
        + * + * The 3 styles are identical in terms of failover effect; they are all included merely to help + * programmers pick the variant that is closest to the style they use. + * + *

        Preconditions: + * + *

          + *
        • An Apache Cassandra(R) cluster with two datacenters, dc1 and dc2, containing at least 3 + * nodes in each datacenter, is running and accessible through the contact point: + * 127.0.0.1:9042. + *
        + * + *

        Side effects: + * + *

          + *
        1. Creates a new keyspace {@code failover} in the cluster, with replication factor 3 in both + * datacenters. If a keyspace with this name already exists, it will be reused; + *
        2. Creates a new table {@code failover.orders}. If a table with that name exists already, it + * will be reused; + *
        3. Tries to write a row in the table using the local datacenter dc1; + *
        4. If the local datacenter dc1 is down, retries the write in the remote datacenter dc2. + *
        + * + * @see Java Driver online + * manual + */ +public class CrossDatacenterFailover { + + public static void main(String[] args) throws Exception { + + CrossDatacenterFailover client = new CrossDatacenterFailover(); + + try { + + // Note: when this example is executed, at least the local DC must be available + // since the driver will try to reach contact points in that DC. + + client.connect(); + client.createSchema(); + + // To fully exercise this example, try to stop the entire dc1 here; then observe how + // the writes executed below will first fail in dc1, then be diverted to dc2, where they will + // succeed. + + client.writeSync(); + client.writeAsync(); + client.writeReactive(); + + } finally { + client.close(); + } + } + + private CqlSession session; + + private CrossDatacenterFailover() {} + + /** Initiates a connection to the cluster. */ + private void connect() { + + // For simplicity, this example uses a 100% in-memory configuration loader, but the same + // configuration can be achieved with the more traditional file-based approach. + // Simply put the below snippet in your application.conf file to get the same config: + + /* + datastax-java-driver { + basic.contact-points = [ "127.0.0.1:9042" ] + basic.load-balancing-policy.local-datacenter = "dc1" + basic.request.consistency = LOCAL_QUORUM + profiles { + remote { + basic.load-balancing-policy.local-datacenter = "dc2" + basic.request.consistency = LOCAL_ONE + } + } + } + */ + + OptionsMap options = OptionsMap.driverDefaults(); + // set the datacenter to dc1 in the default profile; this makes dc1 the local datacenter + options.put(TypedDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc1"); + // set the datacenter to dc2 in the "remote" profile + options.put("remote", TypedDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc2"); + // make sure to provide a contact point belonging to dc1, not dc2! + options.put(TypedDriverOption.CONTACT_POINTS, Collections.singletonList("127.0.0.1:9042")); + // in this example, the default consistency level is LOCAL_QUORUM + options.put(TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM"); + // but when failing over, the consistency level will be automatically downgraded to LOCAL_ONE + options.put("remote", TypedDriverOption.REQUEST_CONSISTENCY, "LOCAL_ONE"); + + session = CqlSession.builder().withConfigLoader(DriverConfigLoader.fromMap(options)).build(); + + System.out.println("Connected to cluster with session: " + session.getName()); + } + + /** Creates the schema (keyspace) and table for this example. */ + private void createSchema() { + + session.execute( + "CREATE KEYSPACE IF NOT EXISTS failover WITH replication " + + "= {'class':'NetworkTopologyStrategy', 'dc1':3, 'dc2':3}"); + + session.execute( + "CREATE TABLE IF NOT EXISTS failover.orders (" + + "product_id uuid," + + "timestamp timestamp," + + "price double," + + "PRIMARY KEY (product_id,timestamp)" + + ")"); + } + + /** Inserts data synchronously using the local DC, retrying if necessary in a remote DC. */ + private void writeSync() { + + System.out.println("------- DC failover (sync) ------- "); + + Statement statement = + SimpleStatement.newInstance( + "INSERT INTO failover.orders " + + "(product_id, timestamp, price) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'2018-02-26T13:53:46.345+01:00'," + + "2.34)"); + + try { + + // try the statement using the default profile, which targets the local datacenter dc1. + session.execute(statement); + + System.out.println("Write succeeded"); + + } catch (DriverException e) { + + if (shouldFailover(e)) { + + System.out.println("Write failed in local DC, retrying in remote DC"); + + try { + + // try the statement using the remote profile, which targets the remote datacenter dc2. + session.execute(statement.setExecutionProfileName("remote")); + + System.out.println("Write succeeded"); + + } catch (DriverException e2) { + + System.out.println("Write failed in remote DC"); + + e2.printStackTrace(); + } + } + } + // let other errors propagate + } + + /** Inserts data asynchronously using the local DC, retrying if necessary in a remote DC. */ + private void writeAsync() throws ExecutionException, InterruptedException { + + System.out.println("------- DC failover (async) ------- "); + + Statement statement = + SimpleStatement.newInstance( + "INSERT INTO failover.orders " + + "(product_id, timestamp, price) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'2018-02-26T13:53:46.345+01:00'," + + "2.34)"); + + CompletionStage result = + // try the statement using the default profile, which targets the local datacenter dc1. + session + .executeAsync(statement) + .handle( + (rs, error) -> { + if (error == null) { + return CompletableFuture.completedFuture(rs); + } else { + if (error instanceof DriverException + && shouldFailover((DriverException) error)) { + System.out.println("Write failed in local DC, retrying in remote DC"); + // try the statement using the remote profile, which targets the remote + // datacenter dc2. + return session.executeAsync(statement.setExecutionProfileName("remote")); + } + // let other errors propagate + return CompletableFutures.failedFuture(error); + } + }) + // unwrap (flatmap) the nested future + .thenCompose(future -> future) + .whenComplete( + (rs, error) -> { + if (error == null) { + System.out.println("Write succeeded"); + } else { + System.out.println("Write failed in remote DC"); + error.printStackTrace(); + } + }); + + // for the sake of this example, wait for the operation to finish + result.toCompletableFuture().get(); + } + + /** Inserts data reactively using the local DC, retrying if necessary in a remote DC. */ + private void writeReactive() { + + System.out.println("------- DC failover (reactive) ------- "); + + Statement statement = + SimpleStatement.newInstance( + "INSERT INTO failover.orders " + + "(product_id, timestamp, price) " + + "VALUES (" + + "756716f7-2e54-4715-9f00-91dcbea6cf50," + + "'2018-02-26T13:53:46.345+01:00'," + + "2.34)"); + + Flux result = + // try the statement using the default profile, which targets the local datacenter dc1. + Flux.from(session.executeReactive(statement)) + .onErrorResume( + DriverException.class, + error -> { + if (shouldFailover(error)) { + System.out.println("Write failed in local DC, retrying in remote DC"); + // try the statement using the remote profile, which targets the remote + // datacenter dc2. + return session.executeReactive(statement.setExecutionProfileName("remote")); + } else { + return Flux.error(error); + } + }) + .doOnComplete(() -> System.out.println("Write succeeded")) + .doOnError( + error -> { + System.out.println("Write failed"); + error.printStackTrace(); + }); + + // for the sake of this example, wait for the operation to finish + result.blockLast(); + } + + /** + * Analyzes the error and decides whether to failover to a remote DC. + * + *

        The logic below categorizes driver exceptions in four main groups: + * + *

          + *
        1. Total DC outage: all nodes in DC were known to be down when the request was executed; + *
        2. Partial DC outage: one or many nodes responded, but reported a replica availability + * problem; + *
        3. DC unreachable: one or many nodes were queried, but none responded (timeout); + *
        4. Other errors. + *
        + * + * A DC failover is authorized for the first three groups above: total DC outage, partial DC + * outage, and DC unreachable. + * + *

        This logic is provided as a good starting point for users to create their own DC failover + * strategy; please adjust it to your exact needs. + */ + private boolean shouldFailover(DriverException mainException) { + + if (mainException instanceof NoNodeAvailableException) { + + // No node could be tried, because all nodes in the query plan were down. This could be a + // total DC outage, so trying another DC makes sense. + System.out.println("All nodes were down in this datacenter, failing over"); + return true; + + } else if (mainException instanceof AllNodesFailedException) { + + // Many nodes were tried (as decided by the retry policy), but all failed. This could be a + // partial DC outage: some nodes were up, but the replicas were down. + + boolean failover = false; + + // Inspect the error to find out how many coordinators were tried, and which errors they + // returned. + for (Entry> entry : + ((AllNodesFailedException) mainException).getAllErrors().entrySet()) { + + Node coordinator = entry.getKey(); + List errors = entry.getValue(); + + System.out.printf( + "Node %s in DC %s was tried %d times but failed with:%n", + coordinator.getEndPoint(), coordinator.getDatacenter(), errors.size()); + + for (Throwable nodeException : errors) { + + System.out.printf("\t- %s%n", nodeException); + + // If the error was a replica availability error, then we know that some replicas were + // down in this DC. Retrying in another DC could solve the problem. Other errors don't + // necessarily mean that the DC is unavailable, so we ignore them. + if (isReplicaAvailabilityError(nodeException)) { + failover = true; + } + } + } + + // Authorize the failover if at least one of the coordinators reported a replica availability + // error that could be solved by trying another DC. + if (failover) { + System.out.println( + "Some nodes tried in this DC reported a replica availability error, failing over"); + } else { + System.out.println("All nodes tried in this DC failed unexpectedly, not failing over"); + } + return failover; + + } else if (mainException instanceof DriverTimeoutException) { + + // One or many nodes were tried, but none replied in a timely manner, and the timeout defined + // by the option `datastax-java-driver.basic.request.timeout` was triggered. + // This could be a DC outage as well, or a network partition issue, so trying another DC may + // make sense. + // Note about SLAs: if your application needs to comply with SLAs, and the maximum acceptable + // latency for a request is equal or very close to the request timeout, beware that failing + // over to a different datacenter here could potentially break your SLA. + + System.out.println( + "No node in this DC replied before the timeout was triggered, failing over"); + return true; + + } else if (mainException instanceof CoordinatorException) { + + // Only one node was tried, and it failed (and the retry policy did not tell the driver to + // retry this request, but rather to surface the error immediately). This is rather unusual + // as the driver's default retry policy retries most of these errors, but some custom retry + // policies could decide otherwise. So we apply the same logic as above: if the error is a + // replica availability error, we authorize the failover. + + Node coordinator = ((CoordinatorException) mainException).getCoordinator(); + System.out.printf( + "Node %s in DC %s was tried once but failed with: %s%n", + coordinator.getEndPoint(), coordinator.getDatacenter(), mainException); + + boolean failover = isReplicaAvailabilityError(mainException); + if (failover) { + System.out.println( + "The only node tried in this DC reported a replica availability error, failing over"); + } else { + System.out.println("The only node tried in this DC failed unexpectedly, not failing over"); + } + return failover; + + } else { + + // The request failed with a rather unusual error. This generally indicates a more serious + // issue, since the retry policy decided to surface the error immediately. Trying another DC + // is probably a bad idea. + System.out.println("The request failed unexpectedly, not failing over: " + mainException); + return false; + } + } + + /** + * Whether the given error is a replica availability error. + * + *

        A replica availability error means that the initial consistency level could not be met + * because not enough replicas were alive. + * + *

        When this error happens, it can be worth failing over to a remote DC, as long as at + * least one of the following conditions apply: + * + *

          + *
        1. if the initial consistency level was DC-local, trying another DC may succeed; + *
        2. if the initial consistency level can be downgraded, then retrying again may succeed (in + * the same DC, or in another one). + *
        + * + * In this example both conditions above apply, so we authorize the failover whenever we detect a + * replica availability error. + */ + private boolean isReplicaAvailabilityError(Throwable t) { + return t instanceof UnavailableException || t instanceof QueryConsistencyException; + } + + private void close() { + if (session != null) { + session.close(); + } + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java index 3d2982f0429..6bb2f8a9fdd 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/PlainTextJson.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/codecs/JacksonJsonCodec.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/codecs/JacksonJsonCodec.java deleted file mode 100644 index 763d325f7c9..00000000000 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/codecs/JacksonJsonCodec.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.examples.json.codecs; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.type.TypeFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.IOException; -import java.nio.ByteBuffer; - -/** - * A JSON codec that uses the Jackson library to - * perform serialization and deserialization of JSON objects. - * - *

        This codec maps a single Java object to a single JSON structure at a time; mapping of arrays - * or collections to root-level JSON arrays is not supported, but such a codec can be easily crafted - * after this one. - * - *

        Note that this codec requires the presence of Jackson library at runtime. If you use Maven, - * this can be done by declaring the following dependency in your project: - * - *

        {@code
        - * 
        - *   com.fasterxml.jackson.core
        - *   jackson-databind
        - *   2.9.8
        - * 
        - * }
        - */ -public class JacksonJsonCodec implements TypeCodec { - - private final ObjectMapper objectMapper; - private final GenericType javaType; - - /** - * Creates a new instance for the provided {@code javaClass}, using a default, newly-allocated - * {@link ObjectMapper}. - * - * @param javaClass the Java class this codec maps to. - */ - public JacksonJsonCodec(Class javaClass) { - this(javaClass, new ObjectMapper()); - } - - /** - * Creates a new instance for the provided {@code javaClass}, and using the provided {@link - * ObjectMapper}. - * - * @param javaClass the Java class this codec maps to. - */ - public JacksonJsonCodec(Class javaClass, ObjectMapper objectMapper) { - this.javaType = GenericType.of(javaClass); - this.objectMapper = objectMapper; - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable T value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try { - return ByteBuffer.wrap(objectMapper.writeValueAsBytes(value)); - } catch (JsonProcessingException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - @Nullable - @Override - public T decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - try { - return objectMapper.readValue(Bytes.getArray(bytes), toJacksonJavaType()); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - @NonNull - @Override - public String format(T value) { - if (value == null) { - return "NULL"; - } - String json; - try { - json = objectMapper.writeValueAsString(value); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - return Strings.quote(json); - } - - @Nullable - @Override - @SuppressWarnings("unchecked") - public T parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("JSON strings must be enclosed by single quotes"); - } - String json = Strings.unquote(value); - try { - return (T) objectMapper.readValue(json, toJacksonJavaType()); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - /** - * This method acts as a bridge between the driver's {@link - * com.datastax.oss.driver.api.core.type.reflect.GenericType GenericType} API and Jackson's {@link - * JavaType} API. - * - * @return A {@link JavaType} instance corresponding to the codec's {@link #getJavaType() Java - * type}. - */ - private JavaType toJacksonJavaType() { - return TypeFactory.defaultInstance().constructType(getJavaType().getType()); - } -} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java index 1873ecbfc3e..0d1ed61d2f4 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonColumn.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,9 +27,9 @@ import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.examples.json.PlainTextJson; -import com.datastax.oss.driver.examples.json.codecs.JacksonJsonCodec; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; @@ -35,9 +37,9 @@ * Illustrates how to map a single table column of type {@code VARCHAR}, containing JSON payloads, * into a Java object using the Jackson library. * - *

        This example makes usage of a custom {@link TypeCodec codec}, {@link JacksonJsonCodec}, which - * is implemented in the java-driver-examples module. If you plan to follow this example, make sure - * to include the following Maven dependencies in your project: + *

        This example makes usage of a {@linkplain ExtraTypeCodecs#json(Class) custom codec for JSON}. + * If you plan to follow this example, make sure to include the following Maven dependencies in your + * project: * *

        {@code
          * 
        @@ -70,7 +72,7 @@
         public class JacksonJsonColumn {
         
           // A codec to convert JSON payloads into User instances;
        -  private static final TypeCodec USER_CODEC = new JacksonJsonCodec<>(User.class);
        +  private static final TypeCodec USER_CODEC = ExtraTypeCodecs.json(User.class);
         
           public static void main(String[] args) {
             try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) {
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java
        index 9e214572067..b3c2c6aaa95 100644
        --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonFunction.java
        @@ -1,11 +1,13 @@
         /*
        - * Copyright DataStax, Inc.
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
          *
        - * Licensed under the Apache License, Version 2.0 (the "License");
        - * you may not use this file except in compliance with the License.
        - * You may obtain a copy of the License at
        - *
        - * http://www.apache.org/licenses/LICENSE-2.0
        + *     http://www.apache.org/licenses/LICENSE-2.0
          *
          * Unless required by applicable law or agreed to in writing, software
          * distributed under the License is distributed on an "AS IS" BASIS,
        @@ -27,10 +29,10 @@
         import com.datastax.oss.driver.api.core.cql.Row;
         import com.datastax.oss.driver.api.core.cql.SimpleStatement;
         import com.datastax.oss.driver.api.core.cql.Statement;
        +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs;
         import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
         import com.datastax.oss.driver.api.querybuilder.select.Selector;
         import com.datastax.oss.driver.examples.json.PlainTextJson;
        -import com.datastax.oss.driver.examples.json.codecs.JacksonJsonCodec;
         import com.fasterxml.jackson.annotation.JsonCreator;
         import com.fasterxml.jackson.annotation.JsonProperty;
         import com.fasterxml.jackson.databind.JsonNode;
        @@ -42,8 +44,8 @@
          * href="http://wiki.fasterxml.com/JacksonHome">Jackson library, and leveraging the {@code
          * toJson()} and {@code fromJson()} functions introduced in Cassandra 2.2.
          *
        - * 

        This example makes usage of a custom {@link TypeCodec codec}, {@link JacksonJsonCodec}. If you - * plan to follow this example, make sure to include the following Maven dependencies in your + *

        This example makes usage of a {@linkplain ExtraTypeCodecs#json(Class) custom codec for JSON}. + * If you plan to follow this example, make sure to include the following Maven dependencies in your * project: * *

        {@code
        @@ -82,10 +84,10 @@
         public class JacksonJsonFunction {
         
           // A codec to convert JSON payloads into User instances;
        -  private static final TypeCodec USER_CODEC = new JacksonJsonCodec<>(User.class);
        +  private static final TypeCodec USER_CODEC = ExtraTypeCodecs.json(User.class);
         
           // A codec to convert generic JSON payloads into JsonNode instances
        -  private static final TypeCodec JSON_NODE_CODEC = new JacksonJsonCodec<>(JsonNode.class);
        +  private static final TypeCodec JSON_NODE_CODEC = ExtraTypeCodecs.json(JsonNode.class);
         
           public static void main(String[] args) {
             try (CqlSession session =
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java
        index 2ce4ef4abc8..1a5fed0bbf3 100644
        --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jackson/JacksonJsonRow.java
        @@ -1,11 +1,13 @@
         /*
        - * Copyright DataStax, Inc.
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
          *
        - * Licensed under the Apache License, Version 2.0 (the "License");
        - * you may not use this file except in compliance with the License.
        - * You may obtain a copy of the License at
        - *
        - * http://www.apache.org/licenses/LICENSE-2.0
        + *     http://www.apache.org/licenses/LICENSE-2.0
          *
          * Unless required by applicable law or agreed to in writing, software
          * distributed under the License is distributed on an "AS IS" BASIS,
        @@ -25,9 +27,9 @@
         import com.datastax.oss.driver.api.core.cql.ResultSet;
         import com.datastax.oss.driver.api.core.cql.Row;
         import com.datastax.oss.driver.api.core.cql.Statement;
        +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs;
         import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
         import com.datastax.oss.driver.examples.json.PlainTextJson;
        -import com.datastax.oss.driver.examples.json.codecs.JacksonJsonCodec;
         import com.fasterxml.jackson.annotation.JsonCreator;
         import com.fasterxml.jackson.annotation.JsonProperty;
         
        @@ -36,8 +38,8 @@
          * href="http://wiki.fasterxml.com/JacksonHome">Jackson library, and leveraging the {@code
          * SELECT JSON} and {@code INSERT JSON} syntaxes introduced in Cassandra 2.2.
          *
        - * 

        This example makes usage of a custom {@link TypeCodec codec}, {@link JacksonJsonCodec}. If you - * plan to follow this example, make sure to include the following Maven dependencies in your + *

        This example makes usage of a {@linkplain ExtraTypeCodecs#json(Class) custom codec for JSON}. + * If you plan to follow this example, make sure to include the following Maven dependencies in your * project: * *

        {@code
        @@ -73,7 +75,7 @@
          */
         public class JacksonJsonRow {
           // A codec to convert JSON payloads into User instances;
        -  private static final TypeCodec USER_CODEC = new JacksonJsonCodec<>(User.class);
        +  private static final TypeCodec USER_CODEC = ExtraTypeCodecs.json(User.class);
         
           public static void main(String[] args) {
             try (CqlSession session = CqlSession.builder().addTypeCodecs(USER_CODEC).build()) {
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/codecs/Jsr353JsonCodec.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonCodec.java
        similarity index 76%
        rename from examples/src/main/java/com/datastax/oss/driver/examples/json/codecs/Jsr353JsonCodec.java
        rename to examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonCodec.java
        index 1b7eeae4a08..9b30d5d6c6b 100644
        --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/codecs/Jsr353JsonCodec.java
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonCodec.java
        @@ -1,11 +1,13 @@
         /*
        - * Copyright DataStax, Inc.
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
          *
        - * Licensed under the Apache License, Version 2.0 (the "License");
        - * you may not use this file except in compliance with the License.
        - * You may obtain a copy of the License at
        - *
        - * http://www.apache.org/licenses/LICENSE-2.0
        + *     http://www.apache.org/licenses/LICENSE-2.0
          *
          * Unless required by applicable law or agreed to in writing, software
          * distributed under the License is distributed on an "AS IS" BASIS,
        @@ -13,7 +15,7 @@
          * See the License for the specific language governing permissions and
          * limitations under the License.
          */
        -package com.datastax.oss.driver.examples.json.codecs;
        +package com.datastax.oss.driver.examples.json.jsr;
         
         import com.datastax.oss.driver.api.core.ProtocolVersion;
         import com.datastax.oss.driver.api.core.type.DataType;
        @@ -105,7 +107,7 @@ public Jsr353JsonCodec() {
            * @param config A map of provider-specific configuration properties. May be empty or {@code
            *     null}.
            */
        -  public Jsr353JsonCodec(Map config) {
        +  public Jsr353JsonCodec(@Nullable Map config) {
             readerFactory = Json.createReaderFactory(config);
             writerFactory = Json.createWriterFactory(config);
           }
        @@ -130,15 +132,11 @@ public ByteBuffer encode(
               return null;
             }
             try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
        -      try {
        -        JsonWriter writer = writerFactory.createWriter(baos);
        -        writer.write(value);
        -        return ByteBuffer.wrap(baos.toByteArray());
        -      } catch (JsonException e) {
        -        throw new IllegalArgumentException(e.getMessage(), e);
        -      }
        -    } catch (IOException e) {
        -      throw new IllegalArgumentException(e.getMessage(), e);
        +      JsonWriter writer = writerFactory.createWriter(baos);
        +      writer.write(value);
        +      return ByteBuffer.wrap(baos.toByteArray());
        +    } catch (JsonException | IOException e) {
        +      throw new IllegalArgumentException("Failed to encode value as JSON", e);
             }
           }
         
        @@ -150,40 +148,33 @@ public JsonStructure decode(
               return null;
             }
             try (ByteArrayInputStream bais = new ByteArrayInputStream(Bytes.getArray(bytes))) {
        -      try {
        -        JsonReader reader = readerFactory.createReader(bais);
        -        return reader.read();
        -      } catch (JsonException e) {
        -        throw new IllegalArgumentException(e.getMessage(), e);
        -      }
        -    } catch (IOException e) {
        -      throw new IllegalArgumentException(e.getMessage(), e);
        +      JsonReader reader = readerFactory.createReader(bais);
        +      return reader.read();
        +    } catch (JsonException | IOException e) {
        +      throw new IllegalArgumentException("Failed to decode JSON value", e);
             }
           }
         
           @NonNull
           @Override
        -  public String format(JsonStructure value) throws IllegalArgumentException {
        +  public String format(@Nullable JsonStructure value) {
             if (value == null) {
               return "NULL";
             }
             String json;
             try (StringWriter sw = new StringWriter()) {
        -      try {
        -        JsonWriter writer = writerFactory.createWriter(sw);
        -        writer.write(value);
        -        json = sw.toString();
        -      } catch (JsonException e) {
        -        throw new IllegalArgumentException(e.getMessage(), e);
        -      }
        -    } catch (IOException e) {
        -      throw new IllegalArgumentException(e.getMessage(), e);
        +      JsonWriter writer = writerFactory.createWriter(sw);
        +      writer.write(value);
        +      json = sw.toString();
        +    } catch (JsonException | IOException e) {
        +      throw new IllegalArgumentException("Failed to format value as JSON", e);
             }
             return Strings.quote(json);
           }
         
        +  @Nullable
           @Override
        -  public JsonStructure parse(String value) throws IllegalArgumentException {
        +  public JsonStructure parse(String value) {
             if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) {
               return null;
             }
        @@ -195,7 +186,7 @@ public JsonStructure parse(String value) throws IllegalArgumentException {
               JsonReader reader = readerFactory.createReader(sr);
               return reader.read();
             } catch (JsonException e) {
        -      throw new IllegalArgumentException(e.getMessage(), e);
        +      throw new IllegalArgumentException("Failed to parse value as JSON", e);
             }
           }
         }
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java
        index 6776399699b..9ded61f82e3 100644
        --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonColumn.java
        @@ -1,11 +1,13 @@
         /*
        - * Copyright DataStax, Inc.
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
          *
        - * Licensed under the Apache License, Version 2.0 (the "License");
        - * you may not use this file except in compliance with the License.
        - * You may obtain a copy of the License at
        - *
        - * http://www.apache.org/licenses/LICENSE-2.0
        + *     http://www.apache.org/licenses/LICENSE-2.0
          *
          * Unless required by applicable law or agreed to in writing, software
          * distributed under the License is distributed on an "AS IS" BASIS,
        @@ -27,7 +29,6 @@
         import com.datastax.oss.driver.api.core.cql.Statement;
         import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
         import com.datastax.oss.driver.examples.json.PlainTextJson;
        -import com.datastax.oss.driver.examples.json.codecs.Jsr353JsonCodec;
         import javax.json.Json;
         import javax.json.JsonObject;
         import javax.json.JsonStructure;
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java
        index 60d58275955..25b243eeb5d 100644
        --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonFunction.java
        @@ -1,11 +1,13 @@
         /*
        - * Copyright DataStax, Inc.
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
          *
        - * Licensed under the Apache License, Version 2.0 (the "License");
        - * you may not use this file except in compliance with the License.
        - * You may obtain a copy of the License at
        - *
        - * http://www.apache.org/licenses/LICENSE-2.0
        + *     http://www.apache.org/licenses/LICENSE-2.0
          *
          * Unless required by applicable law or agreed to in writing, software
          * distributed under the License is distributed on an "AS IS" BASIS,
        @@ -29,7 +31,6 @@
         import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
         import com.datastax.oss.driver.api.querybuilder.select.Selector;
         import com.datastax.oss.driver.examples.json.PlainTextJson;
        -import com.datastax.oss.driver.examples.json.codecs.Jsr353JsonCodec;
         import javax.json.Json;
         import javax.json.JsonObject;
         import javax.json.JsonStructure;
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java
        index c7f389f6017..595522fa964 100644
        --- a/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/json/jsr/Jsr353JsonRow.java
        @@ -1,11 +1,13 @@
         /*
        - * Copyright DataStax, Inc.
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
          *
        - * Licensed under the Apache License, Version 2.0 (the "License");
        - * you may not use this file except in compliance with the License.
        - * You may obtain a copy of the License at
        - *
        - * http://www.apache.org/licenses/LICENSE-2.0
        + *     http://www.apache.org/licenses/LICENSE-2.0
          *
          * Unless required by applicable law or agreed to in writing, software
          * distributed under the License is distributed on an "AS IS" BASIS,
        @@ -27,7 +29,6 @@
         import com.datastax.oss.driver.api.core.cql.Statement;
         import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
         import com.datastax.oss.driver.examples.json.PlainTextJson;
        -import com.datastax.oss.driver.examples.json.codecs.Jsr353JsonCodec;
         import javax.json.Json;
         import javax.json.JsonObject;
         import javax.json.JsonStructure;
        diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/KillrVideoMapperExample.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/KillrVideoMapperExample.java
        new file mode 100644
        index 00000000000..6284b16eac1
        --- /dev/null
        +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/KillrVideoMapperExample.java
        @@ -0,0 +1,194 @@
        +/*
        + * Licensed to the Apache Software Foundation (ASF) under one
        + * or more contributor license agreements.  See the NOTICE file
        + * distributed with this work for additional information
        + * regarding copyright ownership.  The ASF licenses this file
        + * to you under the Apache License, Version 2.0 (the
        + * "License"); you may not use this file except in compliance
        + * with the License.  You may obtain a copy of the License at
        + *
        + *     http://www.apache.org/licenses/LICENSE-2.0
        + *
        + * Unless required by applicable law or agreed to in writing, software
        + * distributed under the License is distributed on an "AS IS" BASIS,
        + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        + * See the License for the specific language governing permissions and
        + * limitations under the License.
        + */
        +package com.datastax.oss.driver.examples.mapper;
        +
        +import com.datastax.oss.driver.api.core.CqlIdentifier;
        +import com.datastax.oss.driver.api.core.CqlSession;
        +import com.datastax.oss.driver.api.core.PagingIterable;
        +import com.datastax.oss.driver.api.core.cql.SimpleStatement;
        +import com.datastax.oss.driver.api.core.uuid.Uuids;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.KillrVideoMapper;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.user.User;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.user.UserDao;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.video.LatestVideo;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.video.UserVideo;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.video.Video;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.video.VideoByTag;
        +import com.datastax.oss.driver.examples.mapper.killrvideo.video.VideoDao;
        +import java.nio.charset.StandardCharsets;
        +import java.nio.file.Files;
        +import java.nio.file.Path;
        +import java.nio.file.Paths;
        +import java.time.Instant;
        +import java.time.ZoneOffset;
        +import java.time.format.DateTimeFormatter;
        +import java.util.Arrays;
        +import java.util.HashSet;
        +import java.util.List;
        +import java.util.Optional;
        +import java.util.Set;
        +import java.util.stream.Collectors;
        +
        +/**
        + * Uses the driver's object mapper to interact with a schema.
        + *
        + * 

        We use the data model of the KillrVideo sample + * application. The mapped entities and DAOs are in the {@link + * com.datastax.oss.driver.examples.mapper.killrvideo} package. We only cover a subset of the data + * model (ratings, stats, recommendations and comments are not covered). + * + *

        Preconditions: + * + *

          + *
        • An Apache Cassandra(R) cluster is running and accessible through the contacts points + * identified by basic.contact-points (see application.conf). + *
        + * + *

        Side effects: + * + *

          + *
        • creates a new keyspace "killrvideo" in the session. If a keyspace with this name already + * exists, it will be reused; + *
        • creates the tables of the KillrVideo data model, if they don't already exist; + *
        • inserts a new user, or reuse the existing one if the email address is already taken; + *
        • inserts a video for that user. + *
        + * + * @see Java + * Driver Mapper manual + */ +@SuppressWarnings("CatchAndPrintStackTrace") +public class KillrVideoMapperExample { + + private static final CqlIdentifier KEYSPACE_ID = CqlIdentifier.fromCql("killrvideo"); + + public static void main(String[] args) { + + try (CqlSession session = CqlSession.builder().build()) { + + maybeCreateSchema(session); + + KillrVideoMapper mapper = + KillrVideoMapper.builder(session).withDefaultKeyspace(KEYSPACE_ID).build(); + + // Create a new user + UserDao userDao = mapper.userDao(); + + User user = new User(Uuids.random(), "test", "user", "testuser@example.com", Instant.now()); + + if (userDao.create(user, "fakePasswordForTests".toCharArray())) { + System.out.println("Created " + user); + } else { + user = userDao.getByEmail("testuser@example.com"); + System.out.println("Reusing existing " + user); + } + + // Creating another user with the same email should fail + assert !userDao.create( + new User(Uuids.random(), "test2", "user", "testuser@example.com", Instant.now()), + "fakePasswordForTests2".toCharArray()); + + // Simulate login attempts + tryLogin(userDao, "testuser@example.com", "fakePasswordForTests"); + tryLogin(userDao, "testuser@example.com", "fakePasswordForTests2"); + + // Insert a video + VideoDao videoDao = mapper.videoDao(); + + Video video = new Video(); + video.setUserid(user.getUserid()); + video.setName( + "Getting Started with DataStax Apache Cassandra as a Service on DataStax Astra"); + video.setLocation("https://www.youtube.com/watch?v=68xzKpcZURA"); + Set tags = new HashSet<>(); + tags.add("apachecassandra"); + tags.add("nosql"); + tags.add("hybridcloud"); + video.setTags(tags); + + videoDao.create(video); + System.out.printf("Created video [%s] %s%n", video.getVideoid(), video.getName()); + + // Check that associated denormalized tables have also been updated: + PagingIterable userVideos = videoDao.getByUser(user.getUserid()); + System.out.printf("Videos for %s %s:%n", user.getFirstname(), user.getLastname()); + for (UserVideo userVideo : userVideos) { + System.out.printf(" [%s] %s%n", userVideo.getVideoid(), userVideo.getName()); + } + + PagingIterable latestVideos = videoDao.getLatest(todaysTimestamp()); + System.out.println("Latest videos:"); + for (LatestVideo latestVideo : latestVideos) { + System.out.printf(" [%s] %s%n", latestVideo.getVideoid(), latestVideo.getName()); + } + + PagingIterable videosByTag = videoDao.getByTag("apachecassandra"); + System.out.println("Videos tagged with apachecassandra:"); + for (VideoByTag videoByTag : videosByTag) { + System.out.printf(" [%s] %s%n", videoByTag.getVideoid(), videoByTag.getName()); + } + + // Update the existing video: + Video template = new Video(); + template.setVideoid(video.getVideoid()); + template.setName( + "Getting Started with DataStax Apache Cassandra® as a Service on DataStax Astra"); + videoDao.update(template); + // Reload the whole entity and check the fields + video = videoDao.get(video.getVideoid()); + System.out.printf("Updated name for video %s: %s%n", video.getVideoid(), video.getName()); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private static void tryLogin(UserDao userDao, String email, String password) { + Optional maybeUser = userDao.login(email, password.toCharArray()); + System.out.printf( + "Logging in with %s/%s: %s%n", + email, password, maybeUser.isPresent() ? "Success" : "Failure"); + } + + private static void maybeCreateSchema(CqlSession session) throws Exception { + session.execute( + SimpleStatement.newInstance( + "CREATE KEYSPACE IF NOT EXISTS killrvideo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + .setExecutionProfileName("slow")); + session.execute("USE killrvideo"); + for (String statement : getStatements("killrvideo_schema.cql")) { + session.execute(SimpleStatement.newInstance(statement).setExecutionProfileName("slow")); + } + } + + private static List getStatements(String fileName) throws Exception { + Path path = Paths.get(ClassLoader.getSystemResource(fileName).toURI()); + String contents = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); + return Arrays.stream(contents.split(";")) + .map(String::trim) + .filter(s -> !s.isEmpty()) + .collect(Collectors.toList()); + } + + /** + * KillrVideo uses a textual timestamp to partition recent video. Build the timestamp for today to + * fetch our latest insertions. + */ + private static String todaysTimestamp() { + return DateTimeFormatter.ofPattern("yyyyMMdd").withZone(ZoneOffset.UTC).format(Instant.now()); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/KillrVideoMapper.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/KillrVideoMapper.java new file mode 100644 index 00000000000..c28130481b4 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/KillrVideoMapper.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.examples.mapper.killrvideo.user.UserDao; +import com.datastax.oss.driver.examples.mapper.killrvideo.video.VideoDao; + +@Mapper +public interface KillrVideoMapper { + + @DaoFactory + UserDao userDao(); + + @DaoFactory + VideoDao videoDao(); + + static MapperBuilder builder(CqlSession session) { + return new KillrVideoMapperBuilder(session); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/CreateUserQueryProvider.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/CreateUserQueryProvider.java new file mode 100644 index 00000000000..baaeb13b67b --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/CreateUserQueryProvider.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.user; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import java.time.Instant; +import java.util.Objects; +import java.util.UUID; + +/** + * Provides the implementation of {@link UserDao#create}. + * + *

        Package-private visibility is sufficient, this will be called only from the generated DAO + * implementation. + */ +class CreateUserQueryProvider { + + private final CqlSession session; + private final EntityHelper userHelper; + private final EntityHelper credentialsHelper; + private final PreparedStatement preparedInsertCredentials; + private final PreparedStatement preparedInsertUser; + private final PreparedStatement preparedDeleteCredentials; + private final PreparedStatement preparedDeleteUser; + + CreateUserQueryProvider( + MapperContext context, + EntityHelper userHelper, + EntityHelper credentialsHelper) { + + this.session = context.getSession(); + + this.userHelper = userHelper; + this.credentialsHelper = credentialsHelper; + + this.preparedInsertCredentials = + session.prepare(credentialsHelper.insert().ifNotExists().asCql()); + this.preparedInsertUser = session.prepare(userHelper.insert().asCql()); + this.preparedDeleteCredentials = + session.prepare( + credentialsHelper + .deleteByPrimaryKey() + .ifColumn("userid") + .isEqualTo(bindMarker("userid")) + .builder() + .setConsistencyLevel(DefaultConsistencyLevel.ANY) + .build()); + this.preparedDeleteUser = + session.prepare( + userHelper + .deleteByPrimaryKey() + .ifExists() + .builder() + .setConsistencyLevel(DefaultConsistencyLevel.ANY) + .build()); + } + + boolean create(User user, char[] password) { + Objects.requireNonNull(user.getUserid()); + Objects.requireNonNull(user.getEmail()); + if (user.getCreatedDate() == null) { + user.setCreatedDate(Instant.now()); + } + + try { + // Insert the user first: otherwise there would be a short window where we have credentials + // without a corresponding user in the database, and this is considered an error state in + // LoginQueryProvider + insertUser(user); + if (!insertCredentialsIfNotExists(user.getEmail(), password, user.getUserid())) { + // email already exists + session.execute(preparedDeleteUser.bind(user.getUserid())); + return false; + } + return true; + } catch (Exception insertException) { + // Clean up and rethrow + try { + session.execute(preparedDeleteUser.bind(user.getUserid())); + } catch (Exception e) { + insertException.addSuppressed(e); + } + try { + session.execute(preparedDeleteCredentials.bind(user.getEmail(), user.getUserid())); + } catch (Exception e) { + insertException.addSuppressed(e); + } + throw insertException; + } + } + + private boolean insertCredentialsIfNotExists(String email, char[] password, UUID userId) { + String passwordHash = PasswordHashing.hash(Objects.requireNonNull(password)); + UserCredentials credentials = + new UserCredentials(Objects.requireNonNull(email), passwordHash, userId); + BoundStatementBuilder insertCredentials = preparedInsertCredentials.boundStatementBuilder(); + credentialsHelper.set(credentials, insertCredentials, NullSavingStrategy.DO_NOT_SET, false); + ResultSet resultSet = session.execute(insertCredentials.build()); + return resultSet.wasApplied(); + } + + private void insertUser(User user) { + BoundStatementBuilder insertUser = preparedInsertUser.boundStatementBuilder(); + userHelper.set(user, insertUser, NullSavingStrategy.DO_NOT_SET, false); + session.execute(insertUser.build()); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/LoginQueryProvider.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/LoginQueryProvider.java new file mode 100644 index 00000000000..6d88423f046 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/LoginQueryProvider.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.user; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import java.util.Optional; +import java.util.UUID; + +/** + * Provides the implementation of {@link UserDao#login}. + * + *

        Package-private visibility is sufficient, this will be called only from the generated DAO + * implementation. + */ +class LoginQueryProvider { + + private final CqlSession session; + private final EntityHelper userHelper; + private final PreparedStatement preparedSelectCredentials; + private final PreparedStatement preparedSelectUser; + + LoginQueryProvider( + MapperContext context, + EntityHelper userHelper, + EntityHelper credentialsHelper) { + + this.session = context.getSession(); + + this.userHelper = userHelper; + + this.preparedSelectCredentials = + session.prepare(credentialsHelper.selectByPrimaryKey().asCql()); + this.preparedSelectUser = session.prepare(userHelper.selectByPrimaryKey().asCql()); + } + + Optional login(String email, char[] password) { + return Optional.ofNullable(session.execute(preparedSelectCredentials.bind(email)).one()) + .flatMap( + credentialsRow -> { + String hashedPassword = credentialsRow.getString("password"); + if (PasswordHashing.matches(password, hashedPassword)) { + UUID userid = credentialsRow.getUuid("userid"); + Row userRow = session.execute(preparedSelectUser.bind(userid)).one(); + if (userRow == null) { + throw new IllegalStateException( + "Should have found matching row for userid " + userid); + } else { + return Optional.of(userHelper.get(userRow, false)); + } + } else { + return Optional.empty(); + } + }); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/PasswordHashing.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/PasswordHashing.java new file mode 100644 index 00000000000..def919bc1f8 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/PasswordHashing.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.user; + +import at.favre.lib.crypto.bcrypt.BCrypt; + +/** + * Utility methods to safely store passwords in the database. + * + *

        We rely on a third-party implementation of the bcrypt password hash function. + * + * @see patrickfav/bcrypt + */ +public class PasswordHashing { + + public static String hash(char[] password) { + return BCrypt.withDefaults().hashToString(12, password); + } + + public static boolean matches(char[] password, String hash) { + BCrypt.Result result = BCrypt.verifyer().verify(password, hash); + return result.verified; + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/User.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/User.java new file mode 100644 index 00000000000..f00b142dc7a --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/User.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.user; + +import com.datastax.oss.driver.api.mapper.annotations.CqlName; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import java.time.Instant; +import java.util.StringJoiner; +import java.util.UUID; + +@Entity +@CqlName("users") +public class User { + + @PartitionKey private UUID userid; + private String firstname; + private String lastname; + private String email; + private Instant createdDate; + + public User(UUID userid, String firstname, String lastname, String email, Instant createdDate) { + this.userid = userid; + this.firstname = firstname; + this.lastname = lastname; + this.email = email; + this.createdDate = createdDate; + } + + public User() {} + + public UUID getUserid() { + return userid; + } + + public void setUserid(UUID userid) { + this.userid = userid; + } + + public String getFirstname() { + return firstname; + } + + public void setFirstname(String firstname) { + this.firstname = firstname; + } + + public String getLastname() { + return lastname; + } + + public void setLastname(String lastname) { + this.lastname = lastname; + } + + public String getEmail() { + return email; + } + + public void setEmail(String email) { + this.email = email; + } + + public Instant getCreatedDate() { + return createdDate; + } + + public void setCreatedDate(Instant createdDate) { + this.createdDate = createdDate; + } + + @Override + public String toString() { + return new StringJoiner(", ", User.class.getSimpleName() + "[", "]") + .add("userid=" + userid) + .add("firstname='" + firstname + "'") + .add("lastname='" + lastname + "'") + .add("email='" + email + "'") + .add("createdDate=" + createdDate) + .toString(); + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserCredentials.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserCredentials.java new file mode 100644 index 00000000000..0b6b32219ce --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserCredentials.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.user; + +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import java.util.UUID; + +@Entity +public class UserCredentials { + @PartitionKey private String email; + + private String password; + + private UUID userid; + + public UserCredentials(String email, String password, UUID userid) { + this.email = email; + this.password = password; + this.userid = userid; + } + + public UserCredentials() {} + + public String getEmail() { + return email; + } + + public void setEmail(String email) { + this.email = email; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public UUID getUserid() { + return userid; + } + + public void setUserid(UUID userid) { + this.userid = userid; + } +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserDao.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserDao.java new file mode 100644 index 00000000000..19f7f7cad0e --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/user/UserDao.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.user; + +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import java.util.Optional; +import java.util.UUID; + +@Dao +public interface UserDao { + + /** Simple selection by full primary key. */ + @Select + User get(UUID userid); + + @Select + UserCredentials getCredentials(String email); + + /** + * An alternative to query providers is default methods that call other methods on the DAO. + * + *

        The only drawback is that those other methods have to be part of the DAO's public API. + */ + default User getByEmail(String email) { + UserCredentials credentials = getCredentials(email); + return (credentials == null) ? null : get(credentials.getUserid()); + } + + /** + * Creating a user is more than a single insert: we have to update two different tables, check + * that the email is not used already, and handle password encryption. + * + *

        We use a query provider to wrap everything into a single method. + * + *

        Note that you could opt for a more layered approach: only expose basic operations on the DAO + * (insertCredentialsIfNotExists, insertUser...) and add a service layer on top for more complex + * logic. Both designs are valid, this is a matter of personal choice. + * + * @return {@code true} if the new user was created, or {@code false} if this email address was + * already taken. + */ + @QueryProvider( + providerClass = CreateUserQueryProvider.class, + entityHelpers = {User.class, UserCredentials.class}) + boolean create(User user, char[] password); + + /** + * Similar to {@link #create}, this encapsulates encryption so we use a query provider. + * + * @return the authenticated user, or {@link Optional#empty()} if the credentials are invalid. + */ + @QueryProvider( + providerClass = LoginQueryProvider.class, + entityHelpers = {User.class, UserCredentials.class}) + Optional login(String email, char[] password); +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/video/CreateVideoQueryProvider.java b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/video/CreateVideoQueryProvider.java new file mode 100644 index 00000000000..132baa474d4 --- /dev/null +++ b/examples/src/main/java/com/datastax/oss/driver/examples/mapper/killrvideo/video/CreateVideoQueryProvider.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.examples.mapper.killrvideo.video; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; + +/** + * Provides the implementation of {@link VideoDao#create}. + * + *

        Package-private visibility is sufficient, this will be called only from the generated DAO + * implementation. + */ +class CreateVideoQueryProvider { + + private final CqlSession session; + private final EntityHelper

        Also, note that this queries a different table: DAOs are not limited to a single entity, the + * return type of the method dictates what rows will be mapped to. + */ + @Select + PagingIterable getByUser(UUID userid); + + /** Other selection by partial primary key, for another table. */ + @Select + PagingIterable getLatest(String yyyymmdd); + + /** Other selection by partial primary key, for yet another table. */ + @Select + PagingIterable getByTag(String tag); + + /** + * Creating a video is a bit more complex: because of denormalization, it involves multiple + * tables. + * + *

        A query provider is a nice way to wrap all the queries in a single operation, and hide the + * details from the DAO interface. + */ + @QueryProvider( + providerClass = CreateVideoQueryProvider.class, + entityHelpers = {Video.class, UserVideo.class, LatestVideo.class, VideoByTag.class}) + void create(Video video); + + /** + * Update using a template: the template must have its full primary key set; beyond that, any + * non-null field will be considered as a value to SET on the target row. + * + *

        Note that we specify the null saving strategy for emphasis, but this is the default. + */ + @Update(nullSavingStrategy = NullSavingStrategy.DO_NOT_SET) + void update(Video template); +} diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java b/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java index c59d22815c1..a512457d618 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/paging/ForwardPagingRestUi.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java b/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java index 7e0ed814ec4..7eb3249b0ac 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/paging/RandomPagingRestUi.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,18 +18,17 @@ package com.datastax.oss.driver.examples.paging; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.paging.OffsetPager; +import com.datastax.oss.driver.api.core.paging.OffsetPager.Page; import com.datastax.oss.driver.internal.core.type.codec.DateCodec; import com.sun.net.httpserver.HttpServer; import java.io.IOException; import java.net.URI; -import java.nio.ByteBuffer; import java.time.Instant; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -206,12 +207,12 @@ public static class UserService { @Context private UriInfo uri; private PreparedStatement videosByUser; - private Pager pager; + private OffsetPager pager; @PostConstruct @SuppressWarnings("unused") public void init() { - this.pager = new Pager(session, ITEMS_PER_PAGE); + this.pager = new OffsetPager(ITEMS_PER_PAGE); this.videosByUser = session.prepare( "SELECT videoid, title, added FROM examples.random_paging_rest_ui WHERE userid = ?"); @@ -221,41 +222,37 @@ public void init() { * Returns a paginated list of all the videos created by the given user. * * @param userid the user ID. - * @param page the page to request, or {@code null} to get the first page. + * @param requestedPageNumber the page to request, or {@code null} to get the first page. */ @GET @Path("/{userid}/videos") public UserVideosResponse getUserVideos( - @PathParam("userid") int userid, @QueryParam("page") Integer page) { + @PathParam("userid") int userid, @QueryParam("page") Integer requestedPageNumber) { - Statement statement = videosByUser.bind(userid).setPageSize(FETCH_SIZE); + BoundStatement statement = videosByUser.bind(userid).setPageSize(FETCH_SIZE); - if (page == null) { - page = 1; + if (requestedPageNumber == null) { + requestedPageNumber = 1; } - ResultSet rs = pager.skipTo(statement, page); - - List videos; - boolean empty = !rs.iterator().hasNext(); - if (empty) { - videos = Collections.emptyList(); - } else { - int remaining = ITEMS_PER_PAGE; - videos = new ArrayList<>(remaining); - for (Row row : rs) { - UserVideo video = - new UserVideo(row.getInt("videoid"), row.getString("title"), row.getInstant("added")); - videos.add(video); - - if (--remaining == 0) { - break; - } - } + Page page = pager.getPage(session.execute(statement), requestedPageNumber); + + List videos = new ArrayList<>(page.getElements().size()); + for (Row row : page.getElements()) { + UserVideo video = + new UserVideo(row.getInt("videoid"), row.getString("title"), row.getInstant("added")); + videos.add(video); } + // The actual number could be different if the requested one was past the end + int actualPageNumber = page.getPageNumber(); URI previous = - (page == 1) ? null : uri.getAbsolutePathBuilder().queryParam("page", page - 1).build(); - URI next = (empty) ? null : uri.getAbsolutePathBuilder().queryParam("page", page + 1).build(); + (actualPageNumber == 1) + ? null + : uri.getAbsolutePathBuilder().queryParam("page", actualPageNumber - 1).build(); + URI next = + page.isLast() + ? null + : uri.getAbsolutePathBuilder().queryParam("page", actualPageNumber + 1).build(); return new UserVideosResponse(videos, previous, next); } } @@ -315,59 +312,4 @@ public Instant getAdded() { return added; } } - - /** - * Helper class to emulate random paging. - * - *

        Note that it MUST be stateless, because it is cached as a field in our HTTP handler. - */ - static class Pager { - private final CqlSession session; - private final int pageSize; - - Pager(CqlSession session, int pageSize) { - this.session = session; - this.pageSize = pageSize; - } - - ResultSet skipTo(Statement statement, int displayPage) { - // Absolute index of the first row we want to display on the web page. Our goal is that - // rs.next() returns that row. - int targetRow = (displayPage - 1) * pageSize; - - ResultSet rs = session.execute(statement); - // Absolute index of the next row returned by rs (if it is not exhausted) - int currentRow = 0; - int fetchedSize = rs.getAvailableWithoutFetching(); - ByteBuffer nextState = rs.getExecutionInfo().getPagingState(); - - // Skip protocol pages until we reach the one that contains our target row. - // For example, if the first query returned 60 rows and our target is row number 90, we know - // we can skip those 60 rows directly without even iterating through them. - // This part is optional, we could simply iterate through the rows with the for loop below, - // but that's slightly less efficient because iterating each row involves a bit of internal - // decoding. - while (fetchedSize > 0 && nextState != null && currentRow + fetchedSize < targetRow) { - statement = statement.setPagingState(nextState); - rs = session.execute(statement); - currentRow += fetchedSize; - fetchedSize = rs.getAvailableWithoutFetching(); - nextState = rs.getExecutionInfo().getPagingState(); - } - - if (currentRow < targetRow) { - for (@SuppressWarnings("unused") Row row : rs) { - if (++currentRow == targetRow) { - break; - } - } - } - // If targetRow is past the end, rs will be exhausted. - // This means you can request a page past the end in the web UI (e.g. request page 12 while - // there are only 10 pages), and it will show up as empty. - // One improvement would be to detect that and take a different action, for example redirect - // to page 10 or show an error message, this is left as an exercise for the reader. - return rs; - } - } } diff --git a/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java b/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java index c2cd119a1c8..0577432600b 100644 --- a/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java +++ b/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,13 +36,20 @@ import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; import com.datastax.oss.driver.api.core.servererrors.UnavailableException; import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; +import java.util.List; /** * This example illustrates how to implement a downgrading retry strategy from application code. * - *

        This was provided as a built-in policy in driver 3 ({@code - * DowngradingConsistencyRetryPolicy}), but has been removed from driver 4. See the FAQ. + *

        This strategy is equivalent to the logic implemented by the consistency downgrading retry + * policy, but we think that such a logic should be implemented at application level whenever + * possible. + * + *

        See the FAQ + * and the manual + * section on retries. * *

        Preconditions: * @@ -70,7 +79,7 @@ * idempotence for more information. * * - * @see Java driver online + * @see Java Driver online * manual */ public class DowngradingRetry { @@ -418,9 +427,11 @@ private static ConsistencyLevel downgrade( private static DriverException unwrapAllNodesFailedException(DriverException e) { if (e instanceof AllNodesFailedException) { AllNodesFailedException noHostAvailable = (AllNodesFailedException) e; - for (Throwable error : noHostAvailable.getErrors().values()) { - if (error instanceof QueryConsistencyException || error instanceof UnavailableException) { - return (DriverException) error; + for (List errors : noHostAvailable.getAllErrors().values()) { + for (Throwable error : errors) { + if (error instanceof QueryConsistencyException || error instanceof UnavailableException) { + return (DriverException) error; + } } } // Couldn't find an exploitable error to unwrap: abort. diff --git a/examples/src/main/resources/application.conf b/examples/src/main/resources/application.conf index 002018efc91..170c08d973a 100644 --- a/examples/src/main/resources/application.conf +++ b/examples/src/main/resources/application.conf @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + datastax-java-driver { basic.contact-points = ["127.0.0.1:9042"] basic { @@ -11,4 +28,12 @@ datastax-java-driver { max-concurrent-requests = 32 max-queue-size = 10000 } -} \ No newline at end of file + + advanced.request.warn-if-set-keyspace = false + + profiles { + slow { + basic.request.timeout = 10 seconds + } + } +} diff --git a/examples/src/main/resources/killrvideo_schema.cql b/examples/src/main/resources/killrvideo_schema.cql new file mode 100644 index 00000000000..0c604ba5922 --- /dev/null +++ b/examples/src/main/resources/killrvideo_schema.cql @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// User credentials, keyed by email address so we can authenticate +CREATE TABLE IF NOT EXISTS user_credentials ( + email text, + password text, + userid uuid, + PRIMARY KEY (email) +); + +// Users keyed by id +CREATE TABLE IF NOT EXISTS users ( + userid uuid, + firstname text, + lastname text, + email text, + created_date timestamp, + PRIMARY KEY (userid) +); + +// Videos by id +CREATE TABLE IF NOT EXISTS videos ( + videoid uuid, + userid uuid, + name text, + description text, + location text, + location_type int, + preview_image_location text, + tags set, + added_date timestamp, + PRIMARY KEY (videoid) +); + +// One-to-many from user point of view (lookup table) +CREATE TABLE IF NOT EXISTS user_videos ( + userid uuid, + added_date timestamp, + videoid uuid, + name text, + preview_image_location text, + PRIMARY KEY (userid, added_date, videoid) +) WITH CLUSTERING ORDER BY (added_date DESC, videoid ASC); + +// Track latest videos, grouped by day (if we ever develop a bad hotspot from the daily grouping here, we could mitigate by +// splitting the row using an arbitrary group number, making the partition key (yyyymmdd, group_number)) +CREATE TABLE IF NOT EXISTS latest_videos ( + yyyymmdd text, + added_date timestamp, + videoid uuid, + userid uuid, + name text, + preview_image_location text, + PRIMARY KEY (yyyymmdd, added_date, videoid) +) WITH CLUSTERING ORDER BY (added_date DESC, videoid ASC); + +// Video ratings (counter table) +CREATE TABLE IF NOT EXISTS video_ratings ( + videoid uuid, + rating_counter counter, + rating_total counter, + PRIMARY KEY (videoid) +); + +// Video ratings by user (to try and mitigate voting multiple times) +CREATE TABLE IF NOT EXISTS video_ratings_by_user ( + videoid uuid, + userid uuid, + rating int, + PRIMARY KEY (videoid, userid) +); + +// Records the number of views/playbacks of a video +CREATE TABLE IF NOT EXISTS video_playback_stats ( + videoid uuid, + views counter, + PRIMARY KEY (videoid) +); + +// Recommendations by user (powered by Spark), with the newest videos added to the site always first +CREATE TABLE IF NOT EXISTS video_recommendations ( + userid uuid, + added_date timestamp, + videoid uuid, + rating float, + authorid uuid, + name text, + preview_image_location text, + PRIMARY KEY(userid, added_date, videoid) +) WITH CLUSTERING ORDER BY (added_date DESC, videoid ASC); + +// Recommendations by video (powered by Spark) +CREATE TABLE IF NOT EXISTS video_recommendations_by_video ( + videoid uuid, + userid uuid, + rating float, + added_date timestamp STATIC, + authorid uuid STATIC, + name text STATIC, + preview_image_location text STATIC, + PRIMARY KEY(videoid, userid) +); + +// Index for tag keywords +CREATE TABLE IF NOT EXISTS videos_by_tag ( + tag text, + videoid uuid, + added_date timestamp, + userid uuid, + name text, + preview_image_location text, + tagged_date timestamp, + PRIMARY KEY (tag, videoid) +); + +// Index for tags by first letter in the tag +CREATE TABLE IF NOT EXISTS tags_by_letter ( + first_letter text, + tag text, + PRIMARY KEY (first_letter, tag) +); + +// Comments for a given video +CREATE TABLE IF NOT EXISTS comments_by_video ( + videoid uuid, + commentid timeuuid, + userid uuid, + comment text, + PRIMARY KEY (videoid, commentid) +) WITH CLUSTERING ORDER BY (commentid DESC); + +// Comments for a given user +CREATE TABLE IF NOT EXISTS comments_by_user ( + userid uuid, + commentid timeuuid, + videoid uuid, + comment text, + PRIMARY KEY (userid, commentid) +) WITH CLUSTERING ORDER BY (commentid DESC); diff --git a/examples/src/main/resources/logback.xml b/examples/src/main/resources/logback.xml index db2d9e5bcb4..061ccccad37 100644 --- a/examples/src/main/resources/logback.xml +++ b/examples/src/main/resources/logback.xml @@ -1,12 +1,15 @@ + - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - - - - - \ No newline at end of file + + diff --git a/faq/README.md b/faq/README.md index fdf42c85723..97cb4decd00 100644 --- a/faq/README.md +++ b/faq/README.md @@ -1,3 +1,22 @@ + + ## Frequently asked questions ### I'm modifying a statement and the changes get ignored, why? @@ -43,28 +62,63 @@ use a fully asynchronous programming model (chaining callbacks instead of blocki At any rate, `CompletionStage` has a `toCompletableFuture()` method. In current JDK versions, every `CompletionStage` is a `CompletableFuture`, so the conversion has no performance overhead. -### Where is `DowngradingConsistencyRetryPolicy`? +### Where is `DowngradingConsistencyRetryPolicy` from driver 3? + +**As of driver 4.10, this retry policy was made available again as a built-in alternative to the +default retry policy**: see the [manual](../manual/core/retries) to understand how to use it. +For versions between 4.0 and 4.9 inclusive, there is no built-in equivalent of driver 3 +`DowngradingConsistencyRetryPolicy`. + +That retry policy was indeed removed in driver 4.0.0. The main motivation is that this behavior +should be the application's concern, not the driver's. APIs provided by the driver should instead +encourage idiomatic use of a distributed system like Apache Cassandra, and a downgrading policy +works against this. It suggests that an anti-pattern such as "try to read at QUORUM, but fall back +to ONE if that fails" is a good idea in general use cases, when in reality it provides no better +consistency guarantees than working directly at ONE, but with higher latencies. + +However, we recognize that there are use cases where downgrading is good -- for instance, a +dashboard application would present the latest information by reading at QUORUM, but it's acceptable +for it to display stale information by reading at ONE sometimes. + +Thanks to [JAVA-2900], an equivalent retry policy with downgrading behavior was re-introduced in +driver 4.10. Nonetheless, we urge users to avoid using it unless strictly required, and instead, +carefully choose upfront the consistency level that works best for their use cases. Even if there +is a legitimate reason to downgrade and retry, that should be preferably handled by the application +code. An example of downgrading retries implemented at application level can be found in the driver +[examples repository]. + +[JAVA-2900]: https://datastax-oss.atlassian.net/browse/JAVA-2900 +[examples repository]: https://github.com/datastax/java-driver/blob/4.x/examples/src/main/java/com/datastax/oss/driver/examples/retry/DowngradingRetry.java -That retry policy was deprecated in driver 3.5.0, and does not exist anymore in 4.0.0. The main -motivation is that this behavior should be the application's concern, not the driver's. +### Where is the cross-datacenter failover feature that existed in driver 3? -We recognize that there are use cases where downgrading is good -- for instance, a dashboard -application would present the latest information by reading at QUORUM, but it's acceptable for it to -display stale information by reading at ONE sometimes. +In driver 3, it was possible to configure the load balancing policy to automatically failover to +a remote datacenter, when the local datacenter is down. -But APIs provided by the driver should instead encourage idiomatic use of a distributed system like -Apache Cassandra, and a downgrading policy works against this. It suggests that an anti-pattern such -as "try to read at QUORUM, but fall back to ONE if that fails" is a good idea in general use cases, -when in reality it provides no better consistency guarantees than working directly at ONE, but with -higher latencies. +This ability is considered a misfeature and has been removed from driver 4.0 onwards. -We therefore urge users to carefully choose upfront the consistency level that works best for their -use cases. If there is a legitimate reason to downgrade and retry, that should be handled by the -application code. +However, due to popular demand, cross-datacenter failover has been brought back to driver 4 in +version 4.10.0. + +If you are using a driver version >= 4.10.0, read the [manual](../manual/core/loadbalancing/) to +understand how to enable this feature; for driver versions < 4.10.0, this feature is simply not +available. ### I want to set a date on a bound statement, where did `setTimestamp()` go? The driver now uses Java 8's improved date and time API. CQL type `timestamp` is mapped to `java.time.Instant`, and the corresponding getter and setter are `getInstant` and `setInstant`. -See [Temporal types](../manual/core/temporal_types/) for more details. \ No newline at end of file +See [Temporal types](../manual/core/temporal_types/) for more details. + +### Why do DDL queries have a higher latency than driver 3? + +If you benchmark DDL queries such as `session.execute("CREATE TABLE ...")`, you will observe a +noticeably higher latency than driver 3 (about 1 second). + +This is because those queries are now *debounced*: the driver adds a short wait in an attempt to +group multiple schema changes into a single metadata refresh. If you want to mitigate this, you can +either adjust the debouncing settings, or group your schema updates while temporarily disabling the +metadata; see the [performance](../manual/core/performance/#debouncing) page. + +This only applies to DDL queries; DML statements (`SELECT`, `INSERT`...) are not debounced. diff --git a/guava-shaded/pom.xml b/guava-shaded/pom.xml new file mode 100644 index 00000000000..da2e82e0ab0 --- /dev/null +++ b/guava-shaded/pom.xml @@ -0,0 +1,242 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + + java-driver-guava-shaded + Apache Cassandra Java Driver - guava shaded dep + Shaded Guava artifact for use in the Java driver for Apache Cassandra® + + + com.google.guava + guava + + + com.google.code.findbugs + jsr305 + + + org.checkerframework + checker-qual + + + com.google.errorprone + error_prone_annotations + + + true + + + org.graalvm.nativeimage + svm + 20.0.0 + provided + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 1.12 + + + regex-property + + regex-property + + + maven.main.skip + ${java.version} + ^(?!1.8).+ + true + false + + + + + + maven-shade-plugin + + + shade-guava-dependency + package + + shade + + + + + org.apache.cassandra:java-driver-guava-shaded + com.google.guava:guava + com.google.guava:failureaccess + com.google.j2objc:j2objc-annotations + + + + + com.google + com.datastax.oss.driver.shaded.guava + + + + + com.google.guava:* + + META-INF/** + + + + true + true + + + + + + + maven-clean-plugin + + + clean-classes + package + + clean + + + ${project.build.outputDirectory} + + + + + + maven-dependency-plugin + + + unpack-shaded-classes + package + + unpack + + + ${project.build.outputDirectory} + + + org.apache.cassandra + java-driver-guava-shaded + ${project.version} + jar + + + + + + + + org.apache.felix + maven-bundle-plugin + + 3.5.0 + true + + + generate-shaded-manifest + package + + manifest + + + + com.datastax.oss.driver.shaded.guava + !com.datastax.oss.driver.shaded.guava.errorprone.*, !org.checkerframework.*, * + javax.annotation.*;resolution:=optional;version="[3.0,4)", javax.crypto.*;resolution:=optional, sun.misc.*;resolution:=optional, !com.oracle.svm.*, !com.datastax.oss.driver.shaded.guava.errorprone.*, !org.checkerframework.*, * + + + + + + + maven-assembly-plugin + + + generate-final-shaded-jar + package + + single + + + + + ${project.build.outputDirectory}/META-INF/MANIFEST.MF + + + src/assembly/shaded-jar.xml + + + false + + + + + + maven-jar-plugin + + + empty-javadoc-jar + + jar + + + javadoc + ${basedir}/src/main/javadoc + + + + + + org.revapi + revapi-maven-plugin + + true + + + + + diff --git a/guava-shaded/src/assembly/shaded-jar.xml b/guava-shaded/src/assembly/shaded-jar.xml new file mode 100644 index 00000000000..d762a27b20f --- /dev/null +++ b/guava-shaded/src/assembly/shaded-jar.xml @@ -0,0 +1,48 @@ + + + + shaded-jar + + jar + + false + + + + ${project.build.outputDirectory} + + META-INF/maven/org.apache.cassandra/java-driver-guava-shaded/pom.xml + + + + + + + + ${project.basedir}/dependency-reduced-pom.xml + META-INF/maven/org.apache.cassandra/java-driver-guava-shaded + pom.xml + + + diff --git a/guava-shaded/src/main/java/com/google/common/primitives/LexicographicalComparatorHolderSubstitution.java b/guava-shaded/src/main/java/com/google/common/primitives/LexicographicalComparatorHolderSubstitution.java new file mode 100644 index 00000000000..95e9c70cdbc --- /dev/null +++ b/guava-shaded/src/main/java/com/google/common/primitives/LexicographicalComparatorHolderSubstitution.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.common.primitives; + +import com.oracle.svm.core.annotate.Alias; +import com.oracle.svm.core.annotate.RecomputeFieldValue; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; +import java.util.Comparator; + +@TargetClass(UnsignedBytes.LexicographicalComparatorHolder.class) +final class LexicographicalComparatorHolderSubstitution { + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.FromAlias) + static Comparator BEST_COMPARATOR = UnsignedBytes.lexicographicalComparatorJavaImpl(); + + /* All known cases should be covered by the field substitution above... keeping this only + * for sake of completeness */ + @Substitute + static Comparator getBestComparator() { + return UnsignedBytes.lexicographicalComparatorJavaImpl(); + } +} diff --git a/guava-shaded/src/main/java/com/google/common/primitives/UnsafeComparatorSubstitution.java b/guava-shaded/src/main/java/com/google/common/primitives/UnsafeComparatorSubstitution.java new file mode 100644 index 00000000000..549de0b5c02 --- /dev/null +++ b/guava-shaded/src/main/java/com/google/common/primitives/UnsafeComparatorSubstitution.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.common.primitives; + +import com.oracle.svm.core.annotate.Delete; +import com.oracle.svm.core.annotate.TargetClass; + +@TargetClass(UnsignedBytes.LexicographicalComparatorHolder.UnsafeComparator.class) +@Delete +final class UnsafeComparatorSubstitution {} diff --git a/guava-shaded/src/main/javadoc/README.txt b/guava-shaded/src/main/javadoc/README.txt new file mode 100644 index 00000000000..57f82b2a265 --- /dev/null +++ b/guava-shaded/src/main/javadoc/README.txt @@ -0,0 +1,2 @@ +This empty JAR is generated for compliance with Maven Central rules. Please refer to the original +Guava API docs. \ No newline at end of file diff --git a/install-snapshots.sh b/install-snapshots.sh new file mode 100755 index 00000000000..795b4098f52 --- /dev/null +++ b/install-snapshots.sh @@ -0,0 +1,41 @@ +#!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Install dependencies in the Travis build environment if they are snapshots. +# See .travis.yml + +set -u + +install_snapshot() +{ + URL=$1 + DIRECTORY_NAME=$2 + # Assume the snapshot we want is on the head of the default branch + git clone --depth 1 ${URL} /tmp/${DIRECTORY_NAME} + { + cd /tmp/${DIRECTORY_NAME} + mvn install -DskipTests + } +} + +mvn --projects core dependency:list -DincludeArtifactIds=native-protocol | \ + tee /dev/tty | \ + grep -q native-protocol.*SNAPSHOT +if [ $? -eq 0 ] ; then + install_snapshot https://github.com/datastax/native-protocol.git native-protocol +fi diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml index 1d8ee49696f..e302e12077f 100644 --- a/integration-tests/pom.xml +++ b/integration-tests/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-integration-tests jar - - DataStax Java driver for Apache Cassandra(R) - integration tests - + Apache Cassandra Java Driver - integration tests false ${skipITs} ${skipITs} ${skipITs} - + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + - com.datastax.oss + org.apache.cassandra java-driver-test-infra - ${project.parent.version} test - com.datastax.oss + org.apache.cassandra java-driver-query-builder - ${project.parent.version} test - com.datastax.oss + org.apache.cassandra java-driver-mapper-processor - ${project.parent.version} test true - com.datastax.oss + org.apache.cassandra java-driver-mapper-runtime - ${project.parent.version} + test + + + org.apache.cassandra + java-driver-core + test-jar + test + + + org.apache.cassandra + java-driver-metrics-micrometer + test + + + org.apache.cassandra + java-driver-metrics-microprofile + test + + + com.github.stephenc.jcip + jcip-annotations test @@ -67,6 +93,16 @@ spotbugs-annotations test + + com.fasterxml.jackson.core + jackson-core + test + + + com.fasterxml.jackson.core + jackson-databind + test + com.tngtech.java junit-dataprovider @@ -93,144 +129,180 @@ test - org.lz4 + at.yawk.lz4 lz4-java test - org.ops4j.pax.exam - pax-exam-junit4 + io.reactivex.rxjava2 + rxjava + test + + + org.apache.tinkerpop + gremlin-core + test + + + org.apache.tinkerpop + tinkergraph-gremlin + test + + + org.apache.directory.server + apacheds-core + test + + + org.apache.directory.server + apacheds-protocol-kerberos + test + + + org.apache.directory.server + apacheds-interceptor-kerberos + test + + + org.apache.directory.server + apacheds-protocol-ldap + test + + + org.apache.directory.server + apacheds-ldif-partition + test + + + org.apache.directory.server + apacheds-jdbm-partition + test + + + org.apache.directory.api + api-ldap-codec-standalone + test + + + com.github.tomakehurst + wiremock + test + + + com.datastax.oss.simulacron + simulacron-native-server + test + + + org.apache.commons + commons-exec + test + + + io.smallrye + smallrye-metrics test - org.ops4j.pax.exam - pax-exam-container-native + io.projectreactor + reactor-core test - org.ops4j.pax.exam - pax-exam-link-mvn + io.projectreactor + reactor-test test - org.apache.felix - org.apache.felix.framework + io.projectreactor.tools + blockhound-junit-platform + test + + + com.esri.geometry + esri-geometry-api test - - - maven-jar-plugin - - - test-jar - - test-jar - - - - logback-test.xml - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - - ${project.version} - ${assertj.version} - ${config.version} - ${commons-exec.version} - ${guava.version} - ${hdrhistogram.version} - ${jackson.version} - ${logback.version} - ${lz4.version} - ${metrics.version} - ${native-protocol.version} - ${netty.version} - ${simulacron.version} - ${slf4j.version} - ${snappy.version} - - - org.apache.maven.plugins maven-failsafe-plugin - - - - ${project.version} - ${assertj.version} - ${config.version} - ${commons-exec.version} - ${guava.version} - ${hdrhistogram.version} - ${jackson.version} - ${logback.version} - ${lz4.version} - ${metrics.version} - ${native-protocol.version} - ${netty.version} - ${simulacron.version} - ${slf4j.version} - ${snappy.version} - - parallelizable-tests integration-test - verify + ${testing.jvm}/bin/java com.datastax.oss.driver.categories.ParallelizableTests classes 8 - - true - parallelized + ${project.build.directory}/failsafe-reports/failsafe-summary-parallelized.xml ${skipParallelizableITs} + ${blockhound.argline} + ${testing.jvm}/bin/java serial-tests integration-test - verify - - com.datastax.oss.driver.categories.ParallelizableTests, - com.datastax.oss.driver.categories.IsolatedTests - - true - serial + com.datastax.oss.driver.categories.ParallelizableTests, com.datastax.oss.driver.categories.IsolatedTests + ${project.build.directory}/failsafe-reports/failsafe-summary-serial.xml ${skipSerialITs} + ${blockhound.argline} + ${testing.jvm}/bin/java isolated-tests integration-test - verify com.datastax.oss.driver.categories.IsolatedTests 1 false - true - isolated + ${project.build.directory}/failsafe-reports/failsafe-summary-isolated.xml + ${skipIsolatedITs} + ${blockhound.argline} + ${testing.jvm}/bin/java + + + + verify-parallelized + + verify + + + ${skipParallelizableITs} + ${project.build.directory}/failsafe-reports/failsafe-summary-parallelized.xml + + + + verify-serial + + verify + + + ${skipSerialITs} + ${project.build.directory}/failsafe-reports/failsafe-summary-serial.xml + + + + verify-isolated + + verify + + ${skipIsolatedITs} + ${project.build.directory}/failsafe-reports/failsafe-summary-isolated.xml @@ -242,10 +314,25 @@ true - + + maven-install-plugin + + true + + + + maven-deploy-plugin + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderAlternateIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderAlternateIT.java new file mode 100644 index 00000000000..55c420e276b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderAlternateIT.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "Required for DseAuthenticator") +@RunWith(DataProviderRunner.class) +public class DseGssApiAuthProviderAlternateIT { + @ClassRule public static EmbeddedAdsRule ads = new EmbeddedAdsRule(true); + + @DataProvider + public static Object[][] saslSystemProperties() { + return new Object[][] {{"dse.sasl.service"}, {"dse.sasl.protocol"}}; + } + + @Test + @UseDataProvider("saslSystemProperties") + public void + should_authenticate_using_kerberos_with_keytab_and_alternate_service_principal_using_system_property( + String saslSystemProperty) { + System.setProperty(saslSystemProperty, "alternate"); + try (CqlSession session = + SessionUtils.newSession( + ads.getCcm(), + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) + .withStringMap( + DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, + ImmutableMap.of("javax.security.sasl.qop", "auth-conf")) + .withStringMap( + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, + ImmutableMap.of( + "principal", + ads.getUserPrincipal(), + "useKeyTab", + "true", + "refreshKrb5Config", + "true", + "keyTab", + ads.getUserKeytab().getAbsolutePath())) + .build())) { + Row row = session.execute("select * from system.local").one(); + assertThat(row).isNotNull(); + } finally { + System.clearProperty(saslSystemProperty); + } + } + + @Test + public void should_authenticate_using_kerberos_with_keytab_and_alternate_service_principal() { + try (CqlSession session = + SessionUtils.newSession( + ads.getCcm(), + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) + .withString(DseDriverOption.AUTH_PROVIDER_SERVICE, "alternate") + .withStringMap( + DseDriverOption.AUTH_PROVIDER_SASL_PROPERTIES, + ImmutableMap.of("javax.security.sasl.qop", "auth-conf")) + .withStringMap( + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, + ImmutableMap.of( + "principal", + ads.getUserPrincipal(), + "useKeyTab", + "true", + "refreshKrb5Config", + "true", + "keyTab", + ads.getUserKeytab().getAbsolutePath())) + .build())) { + Row row = session.execute("select * from system.local").one(); + assertThat(row).isNotNull(); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderIT.java new file mode 100644 index 00000000000..4ee28d62367 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderIT.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import static com.datastax.dse.driver.api.core.auth.KerberosUtils.acquireTicket; +import static com.datastax.dse.driver.api.core.auth.KerberosUtils.destroyTicket; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.auth.AuthenticationException; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.util.List; +import java.util.Map; +import org.junit.Assume; +import org.junit.ClassRule; +import org.junit.Test; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "Required for DseAuthenticator") +public class DseGssApiAuthProviderIT { + + @ClassRule public static EmbeddedAdsRule ads = new EmbeddedAdsRule(); + + /** + * Ensures that a Session can be established to a DSE server secured with Kerberos and that simple + * queries can be made using a client configuration that provides a keytab file. + */ + @Test + public void should_authenticate_using_kerberos_with_keytab() { + try (CqlSession session = ads.newKeyTabSession()) { + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + } + + /** + * Ensures that a Session can be established to a DSE server secured with Kerberos and that simple + * queries can be made using a client configuration that uses the ticket cache. This test will + * only run on unix platforms since it uses kinit to acquire tickets and kdestroy to destroy them. + */ + @Test + public void should_authenticate_using_kerberos_with_ticket() throws Exception { + String osName = System.getProperty("os.name", "").toLowerCase(); + boolean isUnix = osName.contains("mac") || osName.contains("darwin") || osName.contains("nux"); + Assume.assumeTrue(isUnix); + acquireTicket(ads.getUserPrincipal(), ads.getUserKeytab(), ads.getAdsServer()); + try (CqlSession session = ads.newTicketSession()) { + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } finally { + destroyTicket(ads); + } + } + + /** + * Validates that an AllNodesFailedException is thrown when using a ticket-based configuration and + * no such ticket exists in the user's cache. This is expected because we shouldn't be able to + * establish connection to a cassandra node if we cannot authenticate. + * + * @test_category dse:authentication + */ + @SuppressWarnings("unused") + @Test + public void should_not_authenticate_if_no_ticket_in_cache() { + try (CqlSession session = ads.newTicketSession()) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException e) { + verifyException(e); + } + } + + /** + * Validates that an AllNodesFailedException is thrown when using a keytab-based configuration and + * no such user exists for the given principal. This is expected because we shouldn't be able to + * establish connection to a cassandra node if we cannot authenticate. + * + * @test_category dse:authentication + */ + @SuppressWarnings("unused") + @Test + public void should_not_authenticate_if_keytab_does_not_map_to_valid_principal() { + try (CqlSession session = + ads.newKeyTabSession(ads.getUnknownPrincipal(), ads.getUnknownKeytab().getAbsolutePath())) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException e) { + verifyException(e); + } + } + /** + * Ensures that a Session can be established to a DSE server secured with Kerberos and that simple + * queries can be made using a client configuration that is provided via programatic interface + */ + @Test + public void should_authenticate_using_kerberos_with_keytab_programmatically() { + DseGssApiAuthProviderBase.GssApiOptions.Builder builder = + DseGssApiAuthProviderBase.GssApiOptions.builder(); + Map loginConfig = + ImmutableMap.of( + "principal", + ads.getUserPrincipal(), + "useKeyTab", + "true", + "refreshKrb5Config", + "true", + "keyTab", + ads.getUserKeytab().getAbsolutePath()); + + builder.withLoginConfiguration(loginConfig); + try (CqlSession session = + CqlSession.builder() + .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(builder.build())) + .build()) { + + ResultSet set = session.execute("select * from system.local"); + assertThat(set).isNotNull(); + } + } + + private void verifyException(AllNodesFailedException anfe) { + assertThat(anfe.getAllErrors()).hasSize(1); + List errors = anfe.getAllErrors().values().iterator().next(); + assertThat(errors).hasSize(1); + Throwable firstError = errors.get(0); + assertThat(firstError) + .isInstanceOf(AuthenticationException.class) + .hasMessageContaining("Authentication error on node /127.0.0.1:9042"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderIT.java new file mode 100644 index 00000000000..256c18f841d --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DsePlainTextAuthProviderIT.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.auth.AuthenticationException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "Required for DseAuthenticator") +public class DsePlainTextAuthProviderIT { + + @ClassRule + public static CustomCcmRule ccm = + CustomCcmRule.builder() + .withCassandraConfiguration( + "authenticator", "com.datastax.bdp.cassandra.auth.DseAuthenticator") + .withDseConfiguration("authentication_options.enabled", true) + .withDseConfiguration("authentication_options.default_scheme", "internal") + .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") + .build(); + + @BeforeClass + public static void sleepForAuth() { + if (ccm.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { + // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + + @Test + public void should_connect_dse_plaintext_auth() { + try (CqlSession session = + SessionUtils.newSession( + ccm, + SessionUtils.configLoaderBuilder() + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + session.execute("select * from system.local"); + } + } + + @Test + public void should_connect_dse_plaintext_auth_programmatically() { + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(ccm.getContactPoints()) + .withAuthCredentials("cassandra", "cassandra") + .build()) { + session.execute("select * from system.local"); + } + } + + @SuppressWarnings("unused") + @Test + public void should_not_connect_with_invalid_credentials() { + try (CqlSession session = + SessionUtils.newSession( + ccm, + SessionUtils.configLoaderBuilder() + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "NotARealPassword") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException e) { + verifyException(e); + } + } + + @SuppressWarnings("unused") + @Test + public void should_not_connect_without_credentials() { + try (CqlSession session = + SessionUtils.newSession( + ccm, + SessionUtils.configLoaderBuilder() + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + fail("Expected AllNodesFailedException"); + } catch (AllNodesFailedException e) { + verifyException(e); + } + } + + private void verifyException(AllNodesFailedException anfe) { + assertThat(anfe.getAllErrors()).hasSize(1); + List errors = anfe.getAllErrors().values().iterator().next(); + assertThat(errors).hasSize(1); + Throwable firstError = errors.get(0); + assertThat(firstError) + .isInstanceOf(AuthenticationException.class) + .hasMessageContaining("Authentication error on node /127.0.0.1:9042"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseProxyAuthenticationIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseProxyAuthenticationIT.java new file mode 100644 index 00000000000..a3f1c04afc0 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/DseProxyAuthenticationIT.java @@ -0,0 +1,282 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.auth.AuthenticationException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.servererrors.UnauthorizedException; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; +import java.util.List; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1", + description = "Required for DseAuthenticator with proxy") +public class DseProxyAuthenticationIT { + private static String bobPrincipal; + private static String charliePrincipal; + @ClassRule public static EmbeddedAdsRule ads = new EmbeddedAdsRule(); + + @BeforeClass + public static void addUsers() { + bobPrincipal = ads.addUserAndCreateKeyTab("bob", "fakePasswordForBob"); + charliePrincipal = ads.addUserAndCreateKeyTab("charlie", "fakePasswordForCharlie"); + } + + @Before + public void setupRoles() { + + SchemaChangeSynchronizer.withLock( + () -> { + try (CqlSession session = ads.newKeyTabSession()) { + session.execute( + "CREATE ROLE IF NOT EXISTS alice WITH PASSWORD = 'fakePasswordForAlice' AND LOGIN = FALSE"); + session.execute( + "CREATE ROLE IF NOT EXISTS ben WITH PASSWORD = 'fakePasswordForBen' AND LOGIN = TRUE"); + session.execute("CREATE ROLE IF NOT EXISTS 'bob@DATASTAX.COM' WITH LOGIN = TRUE"); + session.execute( + "CREATE ROLE IF NOT EXISTS 'charlie@DATASTAX.COM' WITH PASSWORD = 'fakePasswordForCharlie' AND LOGIN = TRUE"); + session.execute( + "CREATE ROLE IF NOT EXISTS steve WITH PASSWORD = 'fakePasswordForSteve' AND LOGIN = TRUE"); + session.execute( + "CREATE KEYSPACE IF NOT EXISTS aliceks WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'}"); + session.execute( + "CREATE TABLE IF NOT EXISTS aliceks.alicetable (key text PRIMARY KEY, value text)"); + session.execute( + "INSERT INTO aliceks.alicetable (key, value) VALUES ('hello', 'world')"); + session.execute("GRANT ALL ON KEYSPACE aliceks TO alice"); + session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'ben'"); + session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'bob@DATASTAX.COM'"); + session.execute("GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'steve'"); + session.execute( + "GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES TO 'charlie@DATASTAX.COM'"); + session.execute("GRANT PROXY.LOGIN ON ROLE 'alice' TO 'ben'"); + session.execute("GRANT PROXY.LOGIN ON ROLE 'alice' TO 'bob@DATASTAX.COM'"); + session.execute("GRANT PROXY.EXECUTE ON ROLE 'alice' TO 'steve'"); + session.execute("GRANT PROXY.EXECUTE ON ROLE 'alice' TO 'charlie@DATASTAX.COM'"); + // ben and bob are allowed to login as alice, but not execute as alice. + // charlie and steve are allowed to execute as alice, but not login as alice. + } + }); + } + /** + * Validates that a connection may be successfully made as user 'alice' using the credentials of a + * user 'ben' using {@link PlainTextAuthProvider} assuming ben has PROXY.LOGIN authorization on + * alice. + */ + @Test + public void should_allow_plain_text_authorized_user_to_login_as() { + try (CqlSession session = + SessionUtils.newSession( + ads.ccm, + SessionUtils.configLoaderBuilder() + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "alice") + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "ben") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForBen") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + ResultSet set = session.execute(select); + assertThat(set).isNotNull(); + } + } + + @Test + public void should_allow_plain_text_authorized_user_to_login_as_programmatically() { + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(ads.ccm.getContactPoints()) + .withAuthCredentials("ben", "fakePasswordForBen", "alice") + .build()) { + session.execute("select * from system.local"); + } + } + + /** + * Validates that a connection may successfully made as user 'alice' using the credentials of a + * principal 'bob@DATASTAX.COM' using {@link DseGssApiAuthProvider} assuming 'bob@DATASTAX.COM' + * has PROXY.LOGIN authorization on alice. + */ + @Test + public void should_allow_kerberos_authorized_user_to_login_as() { + try (CqlSession session = + ads.newKeyTabSession( + bobPrincipal, ads.getKeytabForPrincipal(bobPrincipal).getAbsolutePath(), "alice")) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + ResultSet set = session.execute(select); + assertThat(set).isNotNull(); + } + } + + /** + * Validates that a connection does not succeed as user 'alice' using the credentials of a user + * 'steve' assuming 'steve' does not have PROXY.LOGIN authorization on alice. + */ + @Test + public void should_not_allow_plain_text_unauthorized_user_to_login_as() { + try (CqlSession session = + SessionUtils.newSession( + ads.ccm, + SessionUtils.configLoaderBuilder() + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "alice") + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "steve") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForSteve") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + session.execute(select); + fail("Should have thrown AllNodesFailedException on login."); + } catch (AllNodesFailedException anfe) { + verifyException(anfe); + } + } + /** + * Validates that a connection does not succeed as user 'alice' using the credentials of a + * principal 'charlie@DATASTAX.COM' assuming 'charlie@DATASTAX.COM' does not have PROXY.LOGIN + * authorization on alice. + */ + @Test + public void should_not_allow_kerberos_unauthorized_user_to_login_as() throws Exception { + try (CqlSession session = + ads.newKeyTabSession( + charliePrincipal, + ads.getKeytabForPrincipal(charliePrincipal).getAbsolutePath(), + "alice")) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + session.execute(select); + fail("Should have thrown AllNodesFailedException on login."); + } catch (AllNodesFailedException anfe) { + verifyException(anfe); + } + } + /** + * Validates that a query may be successfully made as user 'alice' using a {@link CqlSession} that + * is authenticated to user 'steve' using {@link PlainTextAuthProvider} assuming steve has + * PROXY.EXECUTE authorization on alice. + */ + @Test + public void should_allow_plain_text_authorized_user_to_execute_as() { + try (CqlSession session = + SessionUtils.newSession( + ads.ccm, + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "steve") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForSteve") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); + ResultSet set = session.execute(statementAsAlice); + assertThat(set).isNotNull(); + } + } + /** + * Validates that a query may be successfully made as user 'alice' using a {@link CqlSession} that + * is authenticated to principal 'charlie@DATASTAX.COM' using {@link DseGssApiAuthProvider} + * assuming charlie@DATASTAX.COM has PROXY.EXECUTE authorization on alice. + */ + @Test + public void should_allow_kerberos_authorized_user_to_execute_as() { + try (CqlSession session = + ads.newKeyTabSession( + charliePrincipal, ads.getKeytabForPrincipal(charliePrincipal).getAbsolutePath())) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); + ResultSet set = session.execute(statementAsAlice); + assertThat(set).isNotNull(); + } + } + /** + * Validates that a query may not be made as user 'alice' using a {@link CqlSession} that is + * authenticated to user 'ben' if ben does not have PROXY.EXECUTE authorization on alice. + */ + @Test + public void should_not_allow_plain_text_unauthorized_user_to_execute_as() { + try (CqlSession session = + SessionUtils.newSession( + ads.ccm, + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "ben") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "fakePasswordForBen") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build())) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); + session.execute(statementAsAlice); + fail("Should have thrown UnauthorizedException on executeAs."); + } catch (UnauthorizedException ue) { + verifyException(ue, "ben"); + } + } + /** + * Validates that a query may not be made as user 'alice' using a {@link CqlSession} that is + * authenticated to principal 'bob@DATASTAX.COM' using {@link DseGssApiAuthProvider} if + * bob@DATASTAX.COM does not have PROXY.EXECUTE authorization on alice. + */ + @Test + public void should_not_allow_kerberos_unauthorized_user_to_execute_as() { + try (CqlSession session = + ads.newKeyTabSession( + bobPrincipal, ads.getKeytabForPrincipal(bobPrincipal).getAbsolutePath())) { + SimpleStatement select = SimpleStatement.builder("select * from aliceks.alicetable").build(); + SimpleStatement statementAsAlice = ProxyAuthentication.executeAs("alice", select); + session.execute(statementAsAlice); + fail("Should have thrown UnauthorizedException on executeAs."); + } catch (UnauthorizedException ue) { + verifyException(ue, "bob@DATASTAX.COM"); + } + } + + private void verifyException(AllNodesFailedException anfe) { + assertThat(anfe.getAllErrors()).hasSize(1); + List errors = anfe.getAllErrors().values().iterator().next(); + assertThat(errors).hasSize(1); + Throwable firstError = errors.get(0); + assertThat(firstError) + .isInstanceOf(AuthenticationException.class) + .hasMessageContaining( + "Authentication error on node /127.0.0.1:9042: " + + "server replied with 'Failed to login. Please re-try.' to AuthResponse request"); + } + + private void verifyException(UnauthorizedException ue, String user) { + assertThat(ue.getMessage()) + .contains( + String.format( + "Either '%s' does not have permission to execute queries as 'alice' " + + "or that role does not exist.", + user)); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAds.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAds.java new file mode 100644 index 00000000000..5ca751e9151 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAds.java @@ -0,0 +1,607 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import com.datastax.oss.driver.shaded.guava.common.collect.Maps; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import com.datastax.oss.driver.shaded.guava.common.io.Files; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.UnknownHostException; +import java.nio.charset.Charset; +import java.util.Collections; +import java.util.Map; +import java.util.UUID; +import org.apache.directory.api.ldap.model.constants.SchemaConstants; +import org.apache.directory.api.ldap.model.constants.SupportedSaslMechanisms; +import org.apache.directory.api.ldap.model.csn.CsnFactory; +import org.apache.directory.api.ldap.model.entry.Entry; +import org.apache.directory.api.ldap.model.exception.LdapException; +import org.apache.directory.api.ldap.model.exception.LdapInvalidDnException; +import org.apache.directory.api.ldap.model.name.Dn; +import org.apache.directory.api.ldap.model.schema.SchemaManager; +import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; +import org.apache.directory.server.constants.ServerDNConstants; +import org.apache.directory.server.core.DefaultDirectoryService; +import org.apache.directory.server.core.api.CacheService; +import org.apache.directory.server.core.api.DirectoryService; +import org.apache.directory.server.core.api.DnFactory; +import org.apache.directory.server.core.api.InstanceLayout; +import org.apache.directory.server.core.api.schema.SchemaPartition; +import org.apache.directory.server.core.kerberos.KeyDerivationInterceptor; +import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition; +import org.apache.directory.server.core.partition.ldif.LdifPartition; +import org.apache.directory.server.core.shared.DefaultDnFactory; +import org.apache.directory.server.kerberos.KerberosConfig; +import org.apache.directory.server.kerberos.kdc.KdcServer; +import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory; +import org.apache.directory.server.kerberos.shared.keytab.Keytab; +import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry; +import org.apache.directory.server.ldap.LdapServer; +import org.apache.directory.server.ldap.handlers.sasl.MechanismHandler; +import org.apache.directory.server.ldap.handlers.sasl.cramMD5.CramMd5MechanismHandler; +import org.apache.directory.server.ldap.handlers.sasl.digestMD5.DigestMd5MechanismHandler; +import org.apache.directory.server.ldap.handlers.sasl.gssapi.GssapiMechanismHandler; +import org.apache.directory.server.ldap.handlers.sasl.plain.PlainMechanismHandler; +import org.apache.directory.server.protocol.shared.transport.TcpTransport; +import org.apache.directory.server.protocol.shared.transport.UdpTransport; +import org.apache.directory.shared.kerberos.KerberosTime; +import org.apache.directory.shared.kerberos.codec.types.EncryptionType; +import org.apache.directory.shared.kerberos.components.EncryptionKey; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A convenience utility for running an Embedded Apache Directory Service with LDAP and optionally a + * Kerberos Key Distribution Server. By default listens for LDAP on 10389 and Kerberos on 60088. You + * can use something like Apache Directory Studio + * to verify the server is configured and running correctly by connecting to localhost:10389 with + * username 'uid=admin,ou=system' and password 'secret'. + * + *

        Note: This should only be used for development and testing purposes. + */ +public class EmbeddedAds { + + private static final Logger LOG = LoggerFactory.getLogger(EmbeddedAds.class); + + private final String dn; + + private final String realm; + + private int kdcPort; + + private int ldapPort; + + private final boolean kerberos; + + private InetAddress address; + + private String hostname; + + private File confDir; + + private volatile boolean isInit = false; + + private DirectoryService service; + + private LdapServer ldapServer; + + private KdcServer kdcServer; + + private Dn usersDN; + + private File krb5Conf; + + private EmbeddedAds( + String dn, + String realm, + String address, + int ldapPort, + boolean kerberos, + int kdcPort, + File confDir) { + this.dn = dn; + this.realm = realm; + try { + this.address = InetAddress.getByName(address); + } catch (UnknownHostException e) { + LOG.error("Failure resolving address '{}', falling back to loopback.", address, e); + this.address = InetAddress.getLoopbackAddress(); + } + this.hostname = this.address.getHostName().toLowerCase(); + this.ldapPort = ldapPort; + this.kerberos = kerberos; + this.kdcPort = kdcPort; + this.confDir = confDir; + } + + public void start() throws Exception { + if (isInit) { + return; + } + isInit = true; + File workDir = Files.createTempDir(); + // Set confDir = workDir if not defined. + if (confDir == null) { + confDir = workDir; + } + + if (kerberos) { + kdcPort = kdcPort != -1 ? kdcPort : findAvailablePort(60088); + + // Set system properties required for kerberos auth to work. Unfortunately admin_server + // cannot be expressed via System properties (like realm and kdc can), thus we must create a + // config file. + krb5Conf = createKrb5Conf(); + + System.setProperty("java.security.krb5.conf", krb5Conf.getAbsolutePath()); + // Useful options for debugging. + // System.setProperty("sun.security.krb5.debug", "true"); + // System.setProperty("java.security.debug", "configfile,configparser,gssloginconfig"); + } + + // Initialize service and set its filesystem layout. + service = new DefaultDirectoryService(); + InstanceLayout layout = new InstanceLayout(workDir); + service.setInstanceLayout(layout); + + // Disable ChangeLog as we don't need change tracking. + service.getChangeLog().setEnabled(false); + // Denormalizes attribute DNs to be human readable, i.e uid=admin,ou=system instead of + // 0.9.2.3=admin,2.5=system) + service.setDenormalizeOpAttrsEnabled(true); + + // Create and init cache service which will be used for caching DNs, among other things. + CacheService cacheService = new CacheService(); + cacheService.initialize(layout); + + // Create and load SchemaManager which will create the default schema partition. + SchemaManager schemaManager = new DefaultSchemaManager(); + service.setSchemaManager(schemaManager); + schemaManager.loadAllEnabled(); + + // Create SchemaPartition from schema manager and load ldif from schema directory. + SchemaPartition schemaPartition = new SchemaPartition(schemaManager); + LdifPartition ldifPartition = new LdifPartition(schemaManager, service.getDnFactory()); + ldifPartition.setPartitionPath(new File(layout.getPartitionsDirectory(), "schema").toURI()); + schemaPartition.setWrappedPartition(ldifPartition); + service.setSchemaPartition(schemaPartition); + + // Create a DN factory which can be used to create and cache DNs. + DnFactory dnFactory = new DefaultDnFactory(schemaManager, cacheService.getCache("dnCache")); + service.setDnFactory(dnFactory); + + // Create mandatory system partition. This is used for storing server configuration. + JdbmPartition systemPartition = + createPartition("system", dnFactory.create(ServerDNConstants.SYSTEM_DN)); + service.setSystemPartition(systemPartition); + + // Now that we have a schema and system partition, start up the directory service. + service.startup(); + + // Create partition where user, tgt and ldap principals will live. + Dn partitionDn = dnFactory.create(dn); + String dnName = partitionDn.getRdn().getValue().getString(); + JdbmPartition partition = createPartition(dnName, partitionDn); + + // Add a context entry so the partition can be referenced by entries. + Entry context = service.newEntry(partitionDn); + context.add("objectClass", "top", "domain", "extensibleObject"); + context.add(partitionDn.getRdn().getType(), dnName); + partition.setContextEntry(context); + service.addPartition(partition); + + // Create users domain. + usersDN = partitionDn.add(dnFactory.create("ou=users")); + Entry usersEntry = service.newEntry(usersDN); + usersEntry.add("objectClass", "organizationalUnit", "top"); + usersEntry.add("ou", "users"); + if (kerberos) { + usersEntry = kerberize(usersEntry); + } + service.getAdminSession().add(usersEntry); + + // Uncomment to allow to connect to ldap server without credentials for convenience. + // service.setAllowAnonymousAccess(true); + + startLdap(); + + // Create sasl and krbtgt principals and start KDC if kerberos is enabled. + if (kerberos) { + // Ticket Granting Ticket entry. + Dn tgtDN = usersDN.add(dnFactory.create("uid=krbtgt")); + String servicePrincipal = "krbtgt/" + realm + "@" + realm; + Entry tgtEntry = service.newEntry(tgtDN); + tgtEntry.add( + "objectClass", + "person", + "inetOrgPerson", + "top", + "krb5KDCEntry", + "uidObject", + "krb5Principal"); + tgtEntry.add("krb5KeyVersionNumber", "0"); + tgtEntry.add("krb5PrincipalName", servicePrincipal); + tgtEntry.add("uid", "krbtgt"); + tgtEntry.add("userPassword", "secret"); + tgtEntry.add("sn", "Service"); + tgtEntry.add("cn", "KDC Service"); + service.getAdminSession().add(kerberize(tgtEntry)); + + // LDAP SASL principal. + String saslPrincipal = "ldap/" + hostname + "@" + realm; + ldapServer.setSaslPrincipal(saslPrincipal); + Dn ldapDN = usersDN.add(dnFactory.create("uid=ldap")); + Entry ldapEntry = service.newEntry(ldapDN); + ldapEntry.add( + "objectClass", + "top", + "person", + "inetOrgPerson", + "krb5KDCEntry", + "uidObject", + "krb5Principal"); + ldapEntry.add("krb5KeyVersionNumber", "0"); + ldapEntry.add("krb5PrincipalName", saslPrincipal); + ldapEntry.add("uid", "ldap"); + ldapEntry.add("userPassword", "secret"); + ldapEntry.add("sn", "Service"); + ldapEntry.add("cn", "LDAP Service"); + service.getAdminSession().add(kerberize(ldapEntry)); + + startKDC(servicePrincipal); + } + } + + public boolean isStarted() { + return this.isInit; + } + + private File createKrb5Conf() throws IOException { + File krb5Conf = new File(confDir, "krb5.conf"); + String config = + String.format( + "[libdefaults]%n" + + "default_realm = %s%n" + + "default_tgs_enctypes = aes128-cts-hmac-sha1-96 aes256-cts-hmac-sha1-96%n%n" + + "[realms]%n" + + "%s = {%n" + + " kdc = %s:%d%n" + + " admin_server = %s:%d%n" + + "}%n", + realm, realm, hostname, kdcPort, hostname, kdcPort); + + try (FileOutputStream fios = new FileOutputStream(krb5Conf)) { + PrintWriter pw = + new PrintWriter( + new BufferedWriter(new OutputStreamWriter(fios, Charset.defaultCharset()))); + pw.write(config); + pw.close(); + } + return krb5Conf; + } + + /** + * @return A specialized krb5.conf file that defines and defaults to the domain expressed by this + * server. + */ + public File getKrb5Conf() { + return krb5Conf; + } + + /** + * Adds a user with the given password and principal name and creates a keytab file for + * authenticating with that user's principal. + * + * @param user Username to login with (i.e. cassandra). + * @param password Password to authenticate with. + * @param principal Principal representing the server (i.e. cassandra@DATASTAX.COM). + * @return Generated keytab file for this user. + */ + public File addUserAndCreateKeytab(String user, String password, String principal) + throws IOException, LdapException { + addUser(user, password, principal); + return createKeytab(user, password, principal); + } + + /** + * Creates a keytab file for authenticating with a given principal. + * + * @param user Username to login with (i.e. cassandra). + * @param password Password to authenticate with. + * @param principal Principal representing the server (i.e. cassandra@DATASTAX.COM). + * @return Generated keytab file for this user. + */ + public File createKeytab(String user, String password, String principal) throws IOException { + File keytabFile = new File(confDir, user + ".keytab"); + Keytab keytab = Keytab.getInstance(); + + KerberosTime timeStamp = new KerberosTime(System.currentTimeMillis()); + + Map keys = + KerberosKeyFactory.getKerberosKeys(principal, password); + + KeytabEntry keytabEntry = + new KeytabEntry( + principal, 0, timeStamp, (byte) 0, keys.get(EncryptionType.AES128_CTS_HMAC_SHA1_96)); + + keytab.setEntries(Collections.singletonList(keytabEntry)); + keytab.write(keytabFile); + return keytabFile; + } + + /** + * Adds a user with the given password, does not create necessary kerberos attributes. + * + * @param user Username to login with (i.e. cassandra). + * @param password Password to authenticate with. + */ + public void addUser(String user, String password) throws LdapException { + addUser(user, password, null); + } + + /** + * Adds a user with the given password and principal. If principal is specified and kerberos is + * enabled, user is created with the necessary attributes to authenticate with kerberos (entryCsn, + * entryUuid, etc.). + * + * @param user Username to login with (i.e. cassandra). + * @param password Password to authenticate with. + * @param principal Principal representing the server (i.e. cassandra@DATASTAX.COM). + */ + public void addUser(String user, String password, String principal) throws LdapException { + Preconditions.checkState(isInit); + Dn userDN = usersDN.add("uid=" + user); + Entry userEntry = service.newEntry(userDN); + if (kerberos && principal != null) { + userEntry.add( + "objectClass", + "organizationalPerson", + "person", + "extensibleObject", + "inetOrgPerson", + "top", + "krb5KDCEntry", + "uidObject", + "krb5Principal"); + userEntry.add("krb5KeyVersionNumber", "0"); + userEntry.add("krb5PrincipalName", principal); + userEntry = kerberize(userEntry); + } else { + userEntry.add( + "objectClass", + "organizationalPerson", + "person", + "extensibleObject", + "inetOrgPerson", + "top", + "uidObject"); + } + userEntry.add("uid", user); + userEntry.add("sn", user); + userEntry.add("cn", user); + userEntry.add("userPassword", password); + service.getAdminSession().add(userEntry); + } + + /** Stops the server(s) if running. */ + public void stop() { + if (ldapServer != null) { + ldapServer.stop(); + } + if (kdcServer != null) { + kdcServer.stop(); + } + } + + /** @return The evaluated hostname that the server is listening with. */ + public String getHostname() { + return this.hostname; + } + + /** + * Adds attributes to the given Entry which will enable krb5key attributes to be added to them. + * + * @param entry Entry to add attributes to. + * @return The provided entry. + */ + private Entry kerberize(Entry entry) throws LdapException { + // Add csn and uuids for kerberos, this is needed to generate krb5keys. + entry.add(SchemaConstants.ENTRY_CSN_AT, new CsnFactory(0).newInstance().toString()); + entry.add(SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString()); + return entry; + } + + /** + * Creates a {@link JdbmPartition} with the given id and DN. + * + * @param id Id to create partition with. + * @param dn Distinguished Name to use to create partition. + * @return Created partition. + */ + private JdbmPartition createPartition(String id, Dn dn) throws LdapInvalidDnException { + JdbmPartition partition = new JdbmPartition(service.getSchemaManager(), service.getDnFactory()); + partition.setId(id); + partition.setPartitionPath( + new File(service.getInstanceLayout().getPartitionsDirectory(), id).toURI()); + partition.setSuffixDn(dn); + partition.setSchemaManager(service.getSchemaManager()); + return partition; + } + + /** Starts the LDAP Server with SASL enabled. */ + private void startLdap() throws Exception { + // Create and start LDAP server. + ldapServer = new LdapServer(); + + // Enable SASL layer, this is useful with or without kerberos. + Map mechanismHandlerMap = Maps.newHashMap(); + mechanismHandlerMap.put(SupportedSaslMechanisms.PLAIN, new PlainMechanismHandler()); + mechanismHandlerMap.put(SupportedSaslMechanisms.CRAM_MD5, new CramMd5MechanismHandler()); + mechanismHandlerMap.put(SupportedSaslMechanisms.DIGEST_MD5, new DigestMd5MechanismHandler()); + // GSSAPI is required for kerberos. + mechanismHandlerMap.put(SupportedSaslMechanisms.GSSAPI, new GssapiMechanismHandler()); + ldapServer.setSaslMechanismHandlers(mechanismHandlerMap); + ldapServer.setSaslHost(hostname); + // Realms only used by DIGEST_MD5 and GSSAPI. + ldapServer.setSaslRealms(Collections.singletonList(realm)); + ldapServer.setSearchBaseDn(dn); + + ldapPort = ldapPort != -1 ? ldapPort : findAvailablePort(10389); + ldapServer.setTransports(new TcpTransport(address.getHostAddress(), ldapPort)); + ldapServer.setDirectoryService(service); + if (kerberos) { + // Add an interceptor to attach krb5keys to created principals. + KeyDerivationInterceptor interceptor = new KeyDerivationInterceptor(); + interceptor.init(service); + service.addLast(interceptor); + } + ldapServer.start(); + } + + /** + * Starts the Kerberos Key Distribution Server supporting AES128 using the given principal for the + * Ticket-granting ticket. + * + * @param servicePrincipal TGT principcal service. + */ + private void startKDC(String servicePrincipal) throws Exception { + KerberosConfig config = new KerberosConfig(); + // We choose AES128_CTS_HMAC_SHA1_96 for our generated keytabs so we don't need JCE. + config.setEncryptionTypes(Sets.newHashSet(EncryptionType.AES128_CTS_HMAC_SHA1_96)); + config.setSearchBaseDn(dn); + config.setServicePrincipal(servicePrincipal); + + kdcServer = new KdcServer(config); + kdcServer.setDirectoryService(service); + + kdcServer.setTransports( + new TcpTransport(address.getHostAddress(), kdcPort), + new UdpTransport(address.getHostAddress(), kdcPort)); + kdcServer.start(); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + + private String dn = "dc=datastax,dc=com"; + + private String realm = "DATASTAX.COM"; + + private boolean kerberos = false; + + private int kdcPort = -1; + + private int ldapPort = -1; + + private String address = "127.0.0.1"; + + private File confDir = null; + + private Builder() {} + + public EmbeddedAds build() { + return new EmbeddedAds(dn, realm, address, ldapPort, kerberos, kdcPort, confDir); + } + + /** + * Configures the base DN to create users under. Defaults to dc=datastax,dc=com. + */ + public Builder withBaseDn(String dn) { + this.dn = dn; + return this; + } + + /** Configures the realm to use for SASL and Kerberos. Defaults to DATASTAX.COM. */ + public Builder withRealm(String realm) { + this.realm = realm; + return this; + } + + /** + * Sets the directory where krb5.conf and generated keytabs are created. Defaults to current + * directory. + */ + public Builder withConfDir(File confDir) { + this.confDir = confDir; + return this; + } + + /** + * Configures the port to use for LDAP. Defaults to the first available port from 10389+. Must + * be greater than 0. + */ + public Builder withLdapPort(int port) { + Preconditions.checkArgument(port > 0); + this.ldapPort = port; + return this; + } + + /** + * Configures the port to use for Kerberos KDC. Defaults to the first available port for 60088+. + * Must be greater than 0. + */ + public Builder withKerberos(int port) { + Preconditions.checkArgument(port > 0); + this.kdcPort = port; + return withKerberos(); + } + + /** + * Configures the server to run with a Kerberos KDC using the first available port for 60088+. + */ + public Builder withKerberos() { + this.kerberos = true; + return this; + } + + /** + * Configures the server to be configured to listen with the given address. Defaults to + * 127.0.0.1. You shouldn't need to change this. + */ + public Builder withAddress(String address) { + this.address = address; + return this; + } + } + + private static int findAvailablePort(int startingWith) { + IOException last = null; + for (int port = startingWith; port < startingWith + 100; port++) { + try { + ServerSocket s = new ServerSocket(port); + s.close(); + return port; + } catch (IOException e) { + last = e; + } + } + // If for whatever reason a port could not be acquired throw the last encountered exception. + throw new RuntimeException("Could not acquire an available port", last); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAdsRule.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAdsRule.java new file mode 100644 index 00000000000..a57e349a51b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/EmbeddedAdsRule.java @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.internal.core.auth.DseGssApiAuthProvider; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import org.junit.AssumptionViolatedException; +import org.junit.rules.ExternalResource; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A testing rule that wraps the EmbeddedAds server, and ccmRule into one rule This is needed + * because ccm needs to be aware of the kerberos server settings prior to it's initialization. + */ +public class EmbeddedAdsRule extends ExternalResource { + + private static final Logger LOG = LoggerFactory.getLogger(EmbeddedAdsRule.class); + + public CustomCcmRule ccm; + // Realm for the KDC. + private final String realm = "DATASTAX.COM"; + private final String address = "127.0.0.1"; + + private final EmbeddedAds adsServer = + EmbeddedAds.builder().withKerberos().withRealm(realm).withAddress(address).build(); + + // Principal for DSE service ( = kerberos_options.service_principal) + private final String servicePrincipal = "dse/" + adsServer.getHostname() + "@" + realm; + + // A non-standard principal for DSE service, to test SASL protocol names + private final String alternateServicePrincipal = + "alternate/" + adsServer.getHostname() + "@" + realm; + + // Principal for the default cassandra user. + private final String userPrincipal = "cassandra@" + realm; + + // Principal for a user that doesn't exist. + private final String unknownPrincipal = "unknown@" + realm; + + // Keytabs to use for auth. + private static File userKeytab; + private static File unknownKeytab; + private static File dseKeytab; + private static File alternateKeytab; + private static Map customKeytabs = new HashMap<>(); + + private boolean alternate = false; + + public EmbeddedAdsRule(boolean alternate) { + this.alternate = alternate; + } + + public EmbeddedAdsRule() { + this(false); + } + + @Override + protected void before() { + try { + if (adsServer.isStarted()) { + return; + } + // Start ldap/kdc server. + adsServer.start(); + + // Create users and keytabs for the DSE principal and cassandra user. + dseKeytab = adsServer.addUserAndCreateKeytab("dse", "fakePasswordForTests", servicePrincipal); + alternateKeytab = + adsServer.addUserAndCreateKeytab( + "alternate", "fakePasswordForTests", alternateServicePrincipal); + userKeytab = + adsServer.addUserAndCreateKeytab("cassandra", "fakePasswordForTests", userPrincipal); + unknownKeytab = adsServer.createKeytab("unknown", "fakePasswordForTests", unknownPrincipal); + + String authenticationOptions = + "" + + "authentication_options:\n" + + " enabled: true\n" + + " default_scheme: kerberos\n" + + " other_schemes:\n" + + " - internal"; + + if (alternate) { + ccm = + CustomCcmRule.builder() + .withCassandraConfiguration( + "authorizer", "com.datastax.bdp.cassandra.auth.DseAuthorizer") + .withCassandraConfiguration( + "authenticator", "com.datastax.bdp.cassandra.auth.DseAuthenticator") + .withDseConfiguration("authorization_options.enabled", true) + .withDseConfiguration(authenticationOptions) + .withDseConfiguration("kerberos_options.qop", "auth-conf") + .withDseConfiguration( + "kerberos_options.keytab", getAlternateKeytab().getAbsolutePath()) + .withDseConfiguration( + "kerberos_options.service_principal", "alternate/_HOST@" + getRealm()) + .withJvmArgs( + "-Dcassandra.superuser_setup_delay_ms=0", + "-Djava.security.krb5.conf=" + getAdsServer().getKrb5Conf().getAbsolutePath()) + .build(); + } else { + ccm = + CustomCcmRule.builder() + .withCassandraConfiguration( + "authorizer", "com.datastax.bdp.cassandra.auth.DseAuthorizer") + .withCassandraConfiguration( + "authenticator", "com.datastax.bdp.cassandra.auth.DseAuthenticator") + .withDseConfiguration("authorization_options.enabled", true) + .withDseConfiguration(authenticationOptions) + .withDseConfiguration("kerberos_options.qop", "auth") + .withDseConfiguration("kerberos_options.keytab", getDseKeytab().getAbsolutePath()) + .withDseConfiguration( + "kerberos_options.service_principal", "dse/_HOST@" + getRealm()) + .withJvmArgs( + "-Dcassandra.superuser_setup_delay_ms=0", + "-Djava.security.krb5.conf=" + getAdsServer().getKrb5Conf().getAbsolutePath()) + .build(); + } + ccm.getCcmBridge().create(); + ccm.getCcmBridge().start(); + + } catch (Exception e) { + LOG.error("Unable to start ads server ", e); + } + } + + @Override + public Statement apply(Statement base, Description description) { + if (BackendRequirementRule.meetsDescriptionRequirements(description)) { + return super.apply(base, description); + } else { + // requirements not met, throw reasoning assumption to skip test + return new Statement() { + @Override + public void evaluate() { + throw new AssumptionViolatedException( + BackendRequirementRule.buildReasonString(description)); + } + }; + } + } + + @Override + protected void after() { + adsServer.stop(); + ccm.getCcmBridge().stop(); + } + + public CqlSession newKeyTabSession(String userPrincipal, String keytabPath) { + return SessionUtils.newSession( + getCcm(), + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) + .withStringMap( + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, + ImmutableMap.of( + "principal", + userPrincipal, + "useKeyTab", + "true", + "refreshKrb5Config", + "true", + "keyTab", + keytabPath)) + .build()); + } + + public CqlSession newKeyTabSession(String userPrincipal, String keytabPath, String authId) { + return SessionUtils.newSession( + getCcm(), + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) + .withStringMap( + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, + ImmutableMap.of( + "principal", + userPrincipal, + "useKeyTab", + "true", + "refreshKrb5Config", + "true", + "keyTab", + keytabPath)) + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, authId) + .build()); + } + + public CqlSession newKeyTabSession() { + return newKeyTabSession(getUserPrincipal(), getUserKeytab().getAbsolutePath()); + } + + public CqlSession newTicketSession() { + return SessionUtils.newSession( + getCcm(), + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, DseGssApiAuthProvider.class) + .withStringMap( + DseDriverOption.AUTH_PROVIDER_LOGIN_CONFIGURATION, + ImmutableMap.of( + "principal", + userPrincipal, + "useTicketCache", + "true", + "refreshKrb5Config", + "true", + "renewTGT", + "true")) + .build()); + } + + public CustomCcmRule getCcm() { + return ccm; + } + + public String getRealm() { + return realm; + } + + public String getAddress() { + return address; + } + + public EmbeddedAds getAdsServer() { + return adsServer; + } + + public String getServicePrincipal() { + return servicePrincipal; + } + + public String getAlternateServicePrincipal() { + return alternateServicePrincipal; + } + + public String getUserPrincipal() { + return userPrincipal; + } + + public String getUnknownPrincipal() { + return unknownPrincipal; + } + + public File getUserKeytab() { + return userKeytab; + } + + public File getUnknownKeytab() { + return unknownKeytab; + } + + public File getDseKeytab() { + return dseKeytab; + } + + public File getAlternateKeytab() { + return alternateKeytab; + } + + public String addUserAndCreateKeyTab(String user, String password) { + String principal = user + "@" + realm; + try { + File keytabFile = adsServer.addUserAndCreateKeytab(user, password, principal); + customKeytabs.put(principal, keytabFile); + } catch (Exception e) { + LOG.error("Unable to add user and create keytab for " + user + " ", e); + } + return principal; + } + + public File getKeytabForPrincipal(String prinicipal) { + return customKeytabs.get(prinicipal); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/KerberosUtils.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/KerberosUtils.java new file mode 100644 index 00000000000..5d385b51c92 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/auth/KerberosUtils.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.commons.exec.CommandLine; +import org.apache.commons.exec.DefaultExecutor; +import org.apache.commons.exec.Executor; + +public class KerberosUtils { + /** + * Executes the given command with KRB5_CONFIG environment variable pointing to the specialized + * config file for the embedded KDC server. + */ + public static void executeCommand(String command, EmbeddedAds adsServer) throws IOException { + Map environmentMap = + ImmutableMap.builder() + .put("KRB5_CONFIG", adsServer.getKrb5Conf().getAbsolutePath()) + .build(); + CommandLine cli = CommandLine.parse(command); + Executor executor = new DefaultExecutor(); + int retValue = executor.execute(cli, environmentMap); + assertThat(retValue).isZero(); + } + + /** + * Acquires a ticket into the cache with the tgt using kinit command with the given principal and + * keytab file. + */ + public static void acquireTicket(String principal, File keytab, EmbeddedAds adsServer) + throws IOException { + executeCommand( + String.format("kinit -t %s -k %s", keytab.getAbsolutePath(), principal), adsServer); + } + + /** Destroys all tickets in the cache with given principal. */ + public static void destroyTicket(EmbeddedAdsRule ads) throws IOException { + executeCommand("kdestroy", ads.getAdsServer()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingIT.java new file mode 100644 index 00000000000..45cc84f0719 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingIT.java @@ -0,0 +1,699 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1.0", + description = "Continuous paging is only available from 5.1.0 onwards") +@Category(ParallelizableTests.class) +@RunWith(DataProviderRunner.class) +public class ContinuousPagingIT extends ContinuousPagingITBase { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = + SessionRule.builder(ccmRule) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METRICS_SESSION_ENABLED, + Collections.singletonList(DseSessionMetric.CONTINUOUS_CQL_REQUESTS.getPath())) + .withStringList( + DefaultDriverOption.METRICS_NODE_ENABLED, + Collections.singletonList(DefaultNodeMetric.CQL_MESSAGES.getPath())) + .build()) + .build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @BeforeClass + public static void setUp() { + initialize(sessionRule.session(), sessionRule.slowProfile()); + } + + /** + * Validates {@link ContinuousSession#executeContinuously(Statement)} with a variety of paging + * options and ensures in all cases the expected number of rows come back. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + @UseDataProvider("pagingOptions") + public void should_execute_synchronously(Options options) { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + DriverExecutionProfile profile = options.asProfile(session); + ContinuousResultSet result = + session.executeContinuously(statement.setExecutionProfile(profile)); + int i = 0; + for (Row row : result) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + assertThat(i).isEqualTo(options.expectedRows); + validateMetrics(session); + } + + /** + * Validates {@link ContinuousSession#executeContinuously(Statement)} with a variety of paging + * options using a prepared statement and ensures in all cases the expected number of rows come + * back. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + @UseDataProvider("pagingOptions") + public void should_execute_prepared_statement_synchronously(Options options) { + CqlSession session = sessionRule.session(); + DriverExecutionProfile profile = options.asProfile(session); + ContinuousResultSet result = + session.executeContinuously(prepared.bind(KEY).setExecutionProfile(profile)); + int i = 0; + for (Row row : result) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + assertThat(i).isEqualTo(options.expectedRows); + validateMetrics(session); + } + + /** + * Validates {@link ContinuousSession#executeContinuouslyAsync(Statement)} with a variety of + * paging options and ensures in all cases the expected number of rows come back and the expected + * number of pages are received. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + @UseDataProvider("pagingOptions") + public void should_execute_asynchronously(Options options) { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + DriverExecutionProfile profile = options.asProfile(session); + PageStatistics stats = + CompletableFutures.getUninterruptibly( + session + .executeContinuouslyAsync(statement.setExecutionProfile(profile)) + .thenCompose(new AsyncContinuousPagingFunction())); + assertThat(stats.rows).isEqualTo(options.expectedRows); + assertThat(stats.pages).isEqualTo(options.expectedPages); + validateMetrics(session); + } + + /** + * Validates that continuous paging is resilient to a schema change being made in the middle of + * producing pages for the driver if the query was a simple statement. + * + *

        Adds a column 'b' after paging the first row in. This column should not be present in the + * in-flight queries' rows, but should be present for subsequent queries. + * + * @test_category queries + * @jira_ticket JAVA-1653 + * @since 1.2.0 + */ + @Test + public void simple_statement_paging_should_be_resilient_to_schema_change() { + CqlSession session = sessionRule.session(); + SimpleStatement simple = SimpleStatement.newInstance("select * from test_prepare"); + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 1) + .withDuration( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(30)) + .withDuration( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(30)); + ContinuousResultSet result = session.executeContinuously(simple.setExecutionProfile(profile)); + Iterator it = result.iterator(); + // First row should have a non-null values. + Row row0 = it.next(); + assertThat(row0.getString("k")).isNotNull(); + assertThat(row0.isNull("v")).isFalse(); + // Make schema change to add b, its metadata should NOT be present in subsequent rows. + CqlSession schemaChangeSession = + SessionUtils.newSession( + ccmRule, session.getKeyspace().orElseThrow(IllegalStateException::new)); + SimpleStatement statement = + SimpleStatement.newInstance("ALTER TABLE test_prepare add b int") + .setExecutionProfile(sessionRule.slowProfile()); + schemaChangeSession.execute(statement); + schemaChangeSession.checkSchemaAgreement(); + while (it.hasNext()) { + // Each row should have a value for k and v, but b should not be present as it was not part + // of the original metadata. + Row row = it.next(); + assertThat(row.getString("k")).isNotNull(); + assertThat(row.isNull("v")).isFalse(); + assertThat(row.getColumnDefinitions().contains("b")).isFalse(); + } + // Subsequent queries should contain b in metadata since its a new query. + result = session.executeContinuously(simple); + it = result.iterator(); + while (it.hasNext()) { + Row row = it.next(); + assertThat(row.getString("k")).isNotNull(); + assertThat(row.isNull("v")).isFalse(); + // b should be null, but present in metadata. + assertThat(row.isNull("b")).isTrue(); + assertThat(row.getColumnDefinitions().contains("b")).isTrue(); + } + } + + /** + * Validates that continuous paging is resilient to a schema change being made in the middle of + * producing pages for the driver if the query was prepared. + * + *

        Drops column 'v' after paging the first row in. This column should still be present in the + * in-flight queries' rows, but it's value should be null. The column should not be present in + * subsequent queries. + * + * @test_category queries + * @jira_ticket JAVA-1653 + * @since 1.2.0 + */ + @Test + public void prepared_statement_paging_should_be_resilient_to_schema_change() { + CqlSession session = sessionRule.session(); + // Create table and prepare select * query against it. + session.execute( + SimpleStatement.newInstance("CREATE TABLE test_prep (k text PRIMARY KEY, v int)") + .setExecutionProfile(SessionUtils.slowProfile(session))); + for (int i = 0; i < 100; i++) { + session.execute(String.format("INSERT INTO test_prep (k, v) VALUES ('foo', %d)", i)); + } + PreparedStatement prepared = session.prepare("SELECT * FROM test_prep WHERE k = ?"); + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 1) + .withDuration( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_FIRST_PAGE, Duration.ofSeconds(30)) + .withDuration( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofSeconds(30)); + ContinuousResultSet result = + session.executeContinuously(prepared.bind("foo").setExecutionProfile(profile)); + Iterator it = result.iterator(); + // First row should have a non-null value for v. + Row row0 = it.next(); + assertThat(row0.getString("k")).isNotNull(); + assertThat(row0.isNull("v")).isFalse(); + // Make schema change to drop v, its metadata should be present, values will be null. + CqlSession schemaChangeSession = + SessionUtils.newSession( + ccmRule, session.getKeyspace().orElseThrow(IllegalStateException::new)); + schemaChangeSession.execute( + SimpleStatement.newInstance("ALTER TABLE test_prep DROP v;") + .setExecutionProfile(SessionUtils.slowProfile(schemaChangeSession))); + while (it.hasNext()) { + // Each row should have a value for k, v should still be present, but null since column was + // dropped. + Row row = it.next(); + assertThat(row.getString("k")).isNotNull(); + if (ccmRule.isDistributionOf( + BackendType.DSE, (dist, cass) -> dist.compareTo(Version.parse("6.0.0")) >= 0)) { + // DSE 6 only, v should be null here since dropped. + // Not reliable for 5.1 since we may have gotten page queued before schema changed. + assertThat(row.isNull("v")).isTrue(); + } + assertThat(row.getColumnDefinitions().contains("v")).isTrue(); + } + // Subsequent queries should lack v from metadata as it was dropped. + prepared = session.prepare("SELECT * FROM test_prep WHERE k = ?"); + result = session.executeContinuously(prepared.bind("foo").setExecutionProfile(profile)); + it = result.iterator(); + while (it.hasNext()) { + Row row = it.next(); + assertThat(row.getString("k")).isNotNull(); + assertThat(row.getColumnDefinitions().contains("v")).isFalse(); + } + } + + /** + * Validates that {@link ContinuousResultSet#cancel()} will cancel a continuous paging session by + * setting maxPagesPerSecond to 1 and sending a cancel immediately and ensuring the total number + * of rows iterated over is equal to the size of pageSize. + * + *

        Also validates that it is possible to resume the operation using the paging state, as + * described in the javadocs of {@link ContinuousResultSet#cancel()}. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + public void should_cancel_with_synchronous_paging() { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + // create options and throttle at a page per second so + // cancel can go out before the next page is sent. + // Note that this might not be perfect if there are pauses + // in the JVM and cancel isn't sent soon enough. + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); + ContinuousResultSet pagingResult = + session.executeContinuously(statement.setExecutionProfile(profile)); + pagingResult.cancel(); + int i = 0; + for (Row row : pagingResult) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + // Expect only 10 rows as paging was cancelled immediately. + assertThat(i).isEqualTo(10); + // attempt to resume the operation from where we left + ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); + ContinuousResultSet pagingResultResumed = + session.executeContinuously( + statement + .setExecutionProfile( + profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) + .setPagingState(pagingState)); + for (Row row : pagingResultResumed) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + assertThat(i).isEqualTo(100); + } + + /** + * Validates that {@link ContinuousAsyncResultSet#cancel()} will cancel a continuous paging + * session by setting maxPagesPerSecond to 1 and sending a cancel after the first page is received + * and then ensuring that the future returned from {@link + * ContinuousAsyncResultSet#fetchNextPage()} fails. + * + *

        Also validates that it is possible to resume the operation using the paging state, as + * described in the javadocs of {@link ContinuousAsyncResultSet#cancel()}. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + public void should_cancel_with_asynchronous_paging() { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + // create options and throttle at a page per second so + // cancel can go out before the next page is sent. + // Note that this might not be perfect if there are pauses + // in the JVM and cancel isn't sent soon enough. + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); + CompletionStage future = + session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); + ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); + // Calling cancel on the previous result should cause the next future to timeout. + pagingResult.cancel(); + CompletionStage fetchNextPageFuture = pagingResult.fetchNextPage(); + try { + // Expect future to fail since it was cancelled. + CompletableFutures.getUninterruptibly(fetchNextPageFuture); + fail("Expected an execution exception since paging was cancelled."); + } catch (CancellationException e) { + assertThat(e) + .hasMessageContaining("Can't get more results") + .hasMessageContaining("query was cancelled"); + } + int i = 0; + for (Row row : pagingResult.currentPage()) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + // Expect only 10 rows as this is the defined page size. + assertThat(i).isEqualTo(10); + // attempt to resume the operation from where we left + ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); + future = + session.executeContinuouslyAsync( + statement + .setExecutionProfile( + profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) + .setPagingState(pagingState)); + ContinuousAsyncResultSet pagingResultResumed; + do { + pagingResultResumed = CompletableFutures.getUninterruptibly(future); + for (Row row : pagingResultResumed.currentPage()) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + if (pagingResultResumed.hasMorePages()) { + future = pagingResultResumed.fetchNextPage(); + } + } while (pagingResultResumed.hasMorePages()); + // expect 10 more rows + assertThat(i).isEqualTo(100); + } + + /** + * Validates that {@link ContinuousAsyncResultSet#cancel()} will cancel a continuous paging + * session and current tracked {@link CompletionStage} tied to the paging session. + * + *

        Also validates that it is possible to resume the operation using the paging state, as + * described in the javadocs of {@link ContinuousAsyncResultSet#cancel()}. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + public void should_cancel_future_when_cancelling_previous_result() { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + // create options and throttle at a page per second so + // cancel can go out before the next page is sent. + // Note that this might not be perfect if there are pauses + // in the JVM and cancel isn't sent soon enough. + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); + CompletionStage future = + session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); + ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); + CompletionStage fetchNextPageFuture = pagingResult.fetchNextPage(); + // Calling cancel on the previous result should cause the current future to be cancelled. + pagingResult.cancel(); + assertThat(fetchNextPageFuture.toCompletableFuture().isCancelled()).isTrue(); + try { + // Expect future to be cancelled since the previous result was cancelled. + CompletableFutures.getUninterruptibly(fetchNextPageFuture); + fail("Expected a cancellation exception since previous result was cancelled."); + } catch (CancellationException ce) { + // expected + } + int i = 0; + for (Row row : pagingResult.currentPage()) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + // Expect only 10 rows as this is the defined page size. + assertThat(i).isEqualTo(10); + // attempt to resume the operation from where we left + ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); + future = + session.executeContinuouslyAsync( + statement + .setExecutionProfile( + profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) + .setPagingState(pagingState)); + ContinuousAsyncResultSet pagingResultResumed; + do { + pagingResultResumed = CompletableFutures.getUninterruptibly(future); + for (Row row : pagingResultResumed.currentPage()) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + if (pagingResultResumed.hasMorePages()) { + future = pagingResultResumed.fetchNextPage(); + } + } while (pagingResultResumed.hasMorePages()); + // expect 10 more rows + assertThat(i).isEqualTo(100); + } + + /** + * Validates that {@link CompletableFuture#cancel(boolean)} will cancel a continuous paging + * session by setting maxPagesPerSecond to 1 and sending a cancel after the first page is received + * and then ensuring that the future returned from {@link + * ContinuousAsyncResultSet#fetchNextPage()} is cancelled. + * + *

        Also validates that it is possible to resume the operation using the paging state, as + * described in the javadocs of {@link ContinuousAsyncResultSet#cancel()}. + * + * @test_category queries + * @jira_ticket JAVA-1322 + * @since 1.2.0 + */ + @Test + public void should_cancel_when_future_is_cancelled() { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + // create options and throttle at a page per second so + // cancel can go out before the next page is sent. + // Note that this might not be perfect if there are pauses + // in the JVM and cancel isn't sent soon enough. + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1); + CompletionStage future = + session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); + ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); + CompletableFuture fetchNextPageFuture = pagingResult.fetchNextPage().toCompletableFuture(); + fetchNextPageFuture.cancel(false); + assertThat(fetchNextPageFuture.isCancelled()).isTrue(); + try { + // Expect cancellation. + CompletableFutures.getUninterruptibly(fetchNextPageFuture); + fail("Expected a cancellation exception since future was cancelled."); + } catch (CancellationException ce) { + // expected + } + int i = 0; + for (Row row : pagingResult.currentPage()) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + // Expect only 10 rows as this is the defined page size. + assertThat(i).isEqualTo(10); + // attempt to resume the operation from where we left + ByteBuffer pagingState = pagingResult.getExecutionInfo().getPagingState(); + future = + session.executeContinuouslyAsync( + statement + .setExecutionProfile( + profile.withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 0)) + .setPagingState(pagingState)); + ContinuousAsyncResultSet pagingResultResumed; + do { + pagingResultResumed = CompletableFutures.getUninterruptibly(future); + for (Row row : pagingResultResumed.currentPage()) { + assertThat(row.getInt("v")).isEqualTo(i); + i++; + } + if (pagingResultResumed.hasMorePages()) { + future = pagingResultResumed.fetchNextPage(); + } + } while (pagingResultResumed.hasMorePages()); + // expect 10 more rows + assertThat(i).isEqualTo(100); + } + + /** + * Validates that a client-side timeout is correctly reported to the caller. + * + * @test_category queries + * @jira_ticket JAVA-1390 + * @since 1.2.0 + */ + @Test + public void should_time_out_when_server_does_not_produce_pages_fast_enough() throws Exception { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + // Throttle server at a page per second and set client timeout much lower so that the client + // will experience a timeout. + // Note that this might not be perfect if there are pauses in the JVM and the timeout + // doesn't fire soon enough. + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1) + .withDuration( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofMillis(100)); + CompletionStage future = + session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); + ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); + try { + pagingResult.fetchNextPage().toCompletableFuture().get(); + fail("Expected a timeout"); + } catch (ExecutionException e) { + assertThat(e.getCause()) + .isInstanceOf(DriverTimeoutException.class) + .hasMessageContaining("Timed out waiting for page 2"); + } + } + + /** + * Validates that the driver behaves appropriately when the client gets behind while paging rows + * in a continuous paging session. The driver should set autoread to false on the channel for that + * connection until the client consumes enough pages, at which point it will reenable autoread and + * continue reading. + * + *

        There is not really a direct way to verify that autoread is disabled, but delaying + * immediately after executing a continuous paging query should produce this effect. + * + * @test_category queries + * @jira_ticket JAVA-1375 + * @since 1.2.0 + */ + @Test + public void should_resume_reading_when_client_catches_up() { + CqlSession session = sessionRule.session(); + SimpleStatement statement = + SimpleStatement.newInstance("SELECT * from test_autoread where k=?", KEY); + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 100); + CompletionStage result = + session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); + // Defer consuming of rows for a second, this should cause autoread to be disabled. + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + // Start consuming rows, this should cause autoread to be reenabled once we consume some pages. + PageStatistics stats = + CompletableFutures.getUninterruptibly( + result.thenCompose(new AsyncContinuousPagingFunction())); + // 20k rows in this table. + assertThat(stats.rows).isEqualTo(20000); + // 200 * 100 = 20k. + assertThat(stats.pages).isEqualTo(200); + } + + private static class PageStatistics { + int rows; + int pages; + + PageStatistics(int rows, int pages) { + this.rows = rows; + this.pages = pages; + } + } + + /** + * A function that when invoked, will return a transformed future with another {@link + * AsyncContinuousPagingFunction} wrapping {@link ContinuousAsyncResultSet#fetchNextPage()} if + * there are more pages, otherwise returns an immediate future that shares {@link PageStatistics} + * about how many rows were returned and how many pages were encountered. + * + *

        Note that if observe that data is not parsed in order this future fails with an Exception. + */ + private static class AsyncContinuousPagingFunction + implements Function> { + + private final int rowsSoFar; + + AsyncContinuousPagingFunction() { + this(0); + } + + AsyncContinuousPagingFunction(int rowsSoFar) { + this.rowsSoFar = rowsSoFar; + } + + @Override + public CompletionStage apply(ContinuousAsyncResultSet input) { + int rows = rowsSoFar; + // Iterate over page and ensure data is in order. + for (Row row : input.currentPage()) { + int v = row.getInt("v"); + if (v != rows) { + fail(String.format("Expected v == %d, got %d.", rows, v)); + } + rows++; + } + // If on last page, complete future, otherwise keep iterating. + if (!input.hasMorePages()) { + // DSE may send an empty page as it can't always know if it's done paging or not yet. + // See: CASSANDRA-8871. In this case, don't count this page. + int pages = rows == rowsSoFar ? input.pageNumber() - 1 : input.pageNumber(); + CompletableFuture future = new CompletableFuture<>(); + future.complete(new PageStatistics(rows, pages)); + return future; + } else { + return input.fetchNextPage().thenCompose(new AsyncContinuousPagingFunction(rows)); + } + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingITBase.java new file mode 100644 index 00000000000..4a68454d559 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/ContinuousPagingITBase.java @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.codahale.metrics.Timer; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.tngtech.java.junit.dataprovider.DataProvider; +import java.time.Duration; +import java.util.UUID; + +public abstract class ContinuousPagingITBase { + + protected static final String KEY = "k"; + + static PreparedStatement prepared; + + protected static void initialize(CqlSession session, DriverExecutionProfile slowProfile) { + session.execute( + SimpleStatement.newInstance("CREATE TABLE test (k text, v int, PRIMARY KEY (k, v))") + .setExecutionProfile(slowProfile)); + // Load enough rows to cause TCP Zero Window. Default window size is 65535 bytes, each row + // is at least 48 bytes, so it would take ~1365 enqueued rows to zero window. + // Conservatively load 20k rows. + session.execute( + SimpleStatement.newInstance( + "CREATE TABLE test_autoread (k text, v int, v0 uuid, v1 uuid, PRIMARY KEY (k, v, v0))") + .setExecutionProfile(slowProfile)); + session.execute( + SimpleStatement.newInstance("CREATE TABLE test_prepare (k text PRIMARY KEY, v int)") + .setExecutionProfile(slowProfile)); + session.checkSchemaAgreement(); + prepared = session.prepare("SELECT v from test where k = ?"); + for (int i = 0; i < 100; i++) { + session.execute(String.format("INSERT INTO test (k, v) VALUES ('%s', %d)", KEY, i)); + } + int count = 0; + for (int i = 0; i < 200; i++) { + BatchStatement batch = BatchStatement.newInstance(DefaultBatchType.UNLOGGED); + for (int j = 0; j < 100; j++) { + batch = + batch.add( + SimpleStatement.newInstance( + "INSERT INTO test_autoread (k, v, v0, v1) VALUES (?, ?, ?, ?)", + KEY, + count++, + UUID.randomUUID(), + UUID.randomUUID())); + } + session.execute(batch); + } + for (int i = 0; i < 100; i++) { + session.execute(String.format("INSERT INTO test_prepare (k, v) VALUES ('%d', %d)", i, i)); + } + } + + @DataProvider(format = "%m[%p[0]]") + public static Object[][] pagingOptions() { + return new Object[][] { + // exact # of rows. + {new Options(100, false, 0, 0, 100, 1)}, + // # of rows - 1. + {new Options(99, false, 0, 0, 100, 2)}, + // # of rows / 2. + {new Options(50, false, 0, 0, 100, 2)}, + // # 1 row per page. + {new Options(1, false, 0, 0, 100, 100)}, + // 10 rows per page, 10 pages overall = 100 (exact). + {new Options(10, false, 10, 0, 100, 10)}, + // 10 rows per page, 9 pages overall = 90 (less than exact number of pages). + {new Options(10, false, 9, 0, 90, 9)}, + // 10 rows per page, 2 pages per second should take ~5secs. + {new Options(10, false, 0, 2, 100, 10)}, + // 8 bytes per page == 1 row per page as len(4) + int(4) for each row. + {new Options(8, true, 0, 0, 100, 100)}, + // 16 bytes per page == 2 rows page per page. + {new Options(16, true, 0, 0, 100, 50)}, + // 32 bytes per page == 4 rows per page. + {new Options(32, true, 0, 0, 100, 25)} + }; + } + + protected void validateMetrics(CqlSession session) { + Node node = session.getMetadata().getNodes().values().iterator().next(); + assertThat(session.getMetrics()).as("assert session.getMetrics() present").isPresent(); + Metrics metrics = session.getMetrics().get(); + assertThat(metrics.getNodeMetric(node, DefaultNodeMetric.CQL_MESSAGES)) + .as("assert metrics.getNodeMetric(node, DefaultNodeMetric.CQL_MESSAGES) present") + .isPresent(); + Timer messages = (Timer) metrics.getNodeMetric(node, DefaultNodeMetric.CQL_MESSAGES).get(); + await() + .atMost(Duration.ofSeconds(5)) + .untilAsserted( + () -> { + assertThat(messages.getCount()) + .as("assert messages.getCount() >= 0") + .isGreaterThan(0); + assertThat(messages.getMeanRate()) + .as("assert messages.getMeanRate() >= 0") + .isGreaterThan(0); + }); + assertThat(metrics.getSessionMetric(DseSessionMetric.CONTINUOUS_CQL_REQUESTS)) + .as("assert metrics.getSessionMetric(DseSessionMetric.CONTINUOUS_CQL_REQUESTS) present") + .isPresent(); + Timer requests = + (Timer) metrics.getSessionMetric(DseSessionMetric.CONTINUOUS_CQL_REQUESTS).get(); + await() + .atMost(Duration.ofSeconds(5)) + .untilAsserted( + () -> { + assertThat(requests.getCount()) + .as("assert requests.getCount() >= 0") + .isGreaterThan(0); + assertThat(requests.getMeanRate()) + .as("assert requests.getMeanRate() >= 0") + .isGreaterThan(0); + }); + } + + public static class Options { + public int pageSize; + public boolean sizeInBytes; + public int maxPages; + public int maxPagesPerSecond; + public int expectedRows; + public int expectedPages; + + Options( + int pageSize, + boolean sizeInBytes, + int maxPages, + int maxPagesPerSecond, + int expectedRows, + int expectedPages) { + this.pageSize = pageSize; + this.sizeInBytes = sizeInBytes; + this.maxPages = maxPages; + this.maxPagesPerSecond = maxPagesPerSecond; + this.expectedRows = expectedRows; + this.expectedPages = expectedPages; + } + + public DriverExecutionProfile asProfile(CqlSession session) { + return session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, pageSize) + .withBoolean(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES, sizeInBytes) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES, maxPages) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, maxPagesPerSecond); + } + + @Override + public String toString() { + return "pageSize=" + + pageSize + + ", sizeInBytes=" + + sizeInBytes + + ", maxPages=" + + maxPages + + ", maxPagesPerSecond=" + + maxPagesPerSecond + + ", expectedRows=" + + expectedRows + + ", expectedPages=" + + expectedPages; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousPagingReactiveIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousPagingReactiveIT.java new file mode 100644 index 00000000000..f2a28d72597 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousPagingReactiveIT.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.cql.continuous.reactive; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.reactivex.Flowable; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1.0", + description = "Continuous paging is only available from 5.1.0 onwards") +@Category(ParallelizableTests.class) +@RunWith(DataProviderRunner.class) +public class ContinuousPagingReactiveIT extends ContinuousPagingITBase { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = + SessionRule.builder(ccmRule) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METRICS_SESSION_ENABLED, + Collections.singletonList(DseSessionMetric.CONTINUOUS_CQL_REQUESTS.getPath())) + .withStringList( + DefaultDriverOption.METRICS_NODE_ENABLED, + Collections.singletonList(DefaultNodeMetric.CQL_MESSAGES.getPath())) + .build()) + .build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @BeforeClass + public static void setUp() { + initialize(sessionRule.session(), sessionRule.slowProfile()); + } + + @Test + @UseDataProvider("pagingOptions") + public void should_execute_reactively(Options options) { + CqlSession session = sessionRule.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + DriverExecutionProfile profile = options.asProfile(session); + ContinuousReactiveResultSet rs = + session.executeContinuouslyReactive(statement.setExecutionProfile(profile)); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).hasSize(options.expectedRows); + Set expectedExecInfos = new LinkedHashSet<>(); + for (int i = 0; i < results.size(); i++) { + ReactiveRow row = results.get(i); + assertThat(row.getInt("v")).isEqualTo(i); + expectedExecInfos.add(row.getExecutionInfo()); + } + + List execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + // DSE may send an empty page as it can't always know if it's done paging or not yet. + // See: CASSANDRA-8871. In this case, this page's execution info appears in + // rs.getExecutionInfos(), but is not present in expectedExecInfos since the page did not + // contain any rows. + assertThat(execInfos).containsAll(expectedExecInfos); + + List colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + ReactiveRow first = results.get(0); + assertThat(colDefs).hasSize(1).containsExactly(first.getColumnDefinitions()); + + List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(first.wasApplied()); + + validateMetrics(session); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/GeometryIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/GeometryIT.java new file mode 100644 index 00000000000..83b01796337 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/GeometryIT.java @@ -0,0 +1,405 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.assertj.core.util.Preconditions; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ParallelizableTests.class}) +public abstract class GeometryIT { + + private final Class genericType; + private final T baseSample; + private final List sampleData; + private final SessionRule sessionRule; + + @SuppressWarnings("unchecked") + GeometryIT(List sampleData, Class genericType, SessionRule sessionRule) { + Preconditions.checkArgument( + sampleData.size() >= 3, "Must be at least 3 samples, was given " + sampleData.size()); + this.baseSample = sampleData.get(0); + this.genericType = genericType; + this.sampleData = sampleData; + this.sessionRule = sessionRule; + } + + static void onTestContextInitialized(String cqlTypeName, SessionRule sessionRule) { + sessionRule + .session() + .execute( + SimpleStatement.builder(String.format("CREATE TYPE udt1 (g '%s')", cqlTypeName)) + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + sessionRule + .session() + .execute( + SimpleStatement.builder( + String.format( + "CREATE TABLE tbl (k uuid PRIMARY KEY, g '%s', l list<'%s'>, s set<'%s'>, m0 map<'%s',int>, m1 map, t tuple<'%s','%s','%s'>, u frozen)", + cqlTypeName, + cqlTypeName, + cqlTypeName, + cqlTypeName, + cqlTypeName, + cqlTypeName, + cqlTypeName, + cqlTypeName)) + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + sessionRule + .session() + .execute( + SimpleStatement.builder( + String.format("CREATE TABLE tblpk (k '%s' primary key, v int)", cqlTypeName)) + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + sessionRule + .session() + .execute( + SimpleStatement.builder( + String.format( + "CREATE TABLE tblclustering (k0 int, k1 '%s', v int, primary key (k0, k1))", + cqlTypeName)) + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + + private void validate(UUID key, String columnName, V expected, GenericType type) { + ResultSet result = + sessionRule + .session() + .execute( + SimpleStatement.builder( + String.format("SELECT k,%s FROM tbl where k =? ", columnName)) + .addPositionalValue(key) + .build()); + Row row = result.iterator().next(); + assertThat(row.getUuid("k")).isEqualTo(key); + assertThat(row.get(columnName, type)).isEqualTo(expected); + assertThat(row.get(1, type)).isEqualTo(expected); + } + + private void validate(UUID key, T expected) { + validate(key, "g", expected, GenericType.of(genericType)); + } + + /** + * Validates that a given geometry value can be inserted into a column using codec.format() and + * verifies that it is stored correctly by retrieving it and ensuring it matches. + */ + @Test + public void should_insert_using_format() { + for (T expected : sampleData) { + + String val = null; + if (expected != null) { + TypeCodec codec = + sessionRule.session().getContext().getCodecRegistry().codecFor(expected); + val = codec.format(expected); + } + UUID key = Uuids.random(); + sessionRule + .session() + .execute(String.format("INSERT INTO tbl (k, g) VALUES (%s, %s)", key, val)); + validate(key, expected); + } + } + + /** + * Validates that a given geometry value can be inserted into a column by providing it as a simple + * statement parameter and verifies that it is stored correctly by retrieving it and ensuring it + * matches. + */ + @Test + public void should_insert_using_simple_statement_with_parameters() { + for (T expected : sampleData) { + UUID key = Uuids.random(); + sessionRule + .session() + .execute( + SimpleStatement.builder("INSERT INTO tbl (k, g) VALUES (?, ?)") + .addPositionalValues(key, expected) + .build()); + validate(key, expected); + } + } + /** + * Validates that a given geometry value can be inserted into a column by providing it as a bound + * parameter in a BoundStatement and verifies that it is stored correctly by retrieving it and + * ensuring it matches. + */ + @Test + public void should_insert_using_prepared_statement_with_parameters() { + for (T expected : sampleData) { + UUID key = Uuids.random(); + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, g) values (?, ?)"); + BoundStatement bs = + prepared.boundStatementBuilder().setUuid(0, key).set(1, expected, genericType).build(); + sessionRule.session().execute(bs); + validate(key, expected); + } + } + /** + * Validates that geometry values can be inserted as a list and verifies that the list is stored + * correctly by retrieving it and ensuring it matches. + */ + @Test + public void should_insert_as_list() { + UUID key = Uuids.random(); + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, l) values (?, ?)"); + + BoundStatement bs = + prepared + .boundStatementBuilder() + .setUuid(0, key) + .setList(1, sampleData, genericType) + .build(); + sessionRule.session().execute(bs); + validate(key, "l", sampleData, GenericType.listOf(genericType)); + } + /** + * Validates that geometry values can be inserted as a set and verifies that the set is stored + * correctly by retrieving it and ensuring it matches. + */ + @Test + public void should_insert_as_set() { + UUID key = Uuids.random(); + Set asSet = Sets.newHashSet(sampleData); + + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, s) values (?, ?)"); + BoundStatement bs = + prepared.boundStatementBuilder().setUuid(0, key).setSet(1, asSet, genericType).build(); + + sessionRule.session().execute(bs); + validate(key, "s", asSet, GenericType.setOf(genericType)); + } + + /** + * Validates that geometry values can be inserted into a map as keys and verifies that the map is + * stored correctly by retrieving it and ensuring it matches. + */ + @Test + public void should_insert_as_map_keys() { + UUID key = Uuids.random(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + int count = 0; + for (T val : sampleData) { + builder = builder.put(val, count++); + } + Map asMapKeys = builder.build(); + + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, m0) values (?, ?)"); + BoundStatement bs = + prepared + .boundStatementBuilder() + .setUuid(0, key) + .setMap(1, asMapKeys, genericType, Integer.class) + .build(); + sessionRule.session().execute(bs); + validate(key, "m0", asMapKeys, GenericType.mapOf(genericType, Integer.class)); + } + + /** + * Validates that geometry values can be inserted into a map as values and verifies that the map + * is stored correctly by retrieving it and ensuring it matches. + */ + @Test + public void should_insert_as_map_values() { + UUID key = Uuids.random(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + int count = 0; + for (T val : sampleData) { + builder = builder.put(count++, val); + } + Map asMapValues = builder.build(); + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, m1) values (?, ?)"); + BoundStatement bs = + prepared + .boundStatementBuilder() + .setUuid(0, key) + .setMap(1, asMapValues, Integer.class, genericType) + .build(); + sessionRule.session().execute(bs); + validate(key, "m1", asMapValues, GenericType.mapOf(Integer.class, genericType)); + } + + /** + * Validates that geometry values can be inserted as a tuple and verifies that the tuple is stored + * correctly by retrieving it and ensuring it matches. + */ + @Test + @Ignore + public void should_insert_as_tuple() { + UUID key = Uuids.random(); + + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, t) values (?, ?)"); + TupleType tupleType = (TupleType) prepared.getVariableDefinitions().get(1).getType(); + TupleValue tuple = tupleType.newValue(); + tuple = tuple.set(0, sampleData.get(0), genericType); + tuple = tuple.set(1, sampleData.get(1), genericType); + tuple = tuple.set(2, sampleData.get(2), genericType); + BoundStatement bs = + prepared.boundStatementBuilder().setUuid(0, key).setTupleValue(1, tuple).build(); + sessionRule.session().execute(bs); + ResultSet rs = + sessionRule + .session() + .execute( + SimpleStatement.builder("SELECT k,t FROM tbl where k=?") + .addPositionalValues(key) + .build()); + Row row = rs.iterator().next(); + assertThat(row.getUuid("k")).isEqualTo(key); + assertThat(row.getTupleValue("t")).isEqualTo(tuple); + assertThat(row.getTupleValue(1)).isEqualTo(tuple); + } + /** + * Validates that a geometry value can be inserted as a field in a UDT and verifies that the UDT + * is stored correctly by retrieving it and ensuring it matches. + */ + @Test + @Ignore + public void should_insert_as_field_in_udt() { + UUID key = Uuids.random(); + UserDefinedType udtType = + sessionRule + .session() + .getMetadata() + .getKeyspace(sessionRule.session().getKeyspace().orElseThrow(AssertionError::new)) + .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("udt1"))) + .orElseThrow(AssertionError::new); + assertThat(udtType).isNotNull(); + UdtValue value = udtType.newValue(); + value = value.set("g", sampleData.get(0), genericType); + + PreparedStatement prepared = + sessionRule.session().prepare("INSERT INTO tbl (k, u) values (?, ?)"); + BoundStatement bs = + prepared.boundStatementBuilder().setUuid(0, key).setUdtValue(1, value).build(); + sessionRule.session().execute(bs); + + ResultSet rs = + sessionRule + .session() + .execute( + SimpleStatement.builder("SELECT k,u FROM tbl where k=?") + .addPositionalValues(key) + .build()); + Row row = rs.iterator().next(); + assertThat(row.getUuid("k")).isEqualTo(key); + assertThat(row.getUdtValue("u")).isEqualTo(value); + assertThat(row.getUdtValue(1)).isEqualTo(value); + } + + /** + * Validates that a geometry value can be inserted into a column that is the partition key and + * then validates that it can be queried back by partition key. + */ + @Test + public void should_accept_as_partition_key() { + sessionRule + .session() + .execute( + SimpleStatement.builder("INSERT INTO tblpk (k, v) VALUES (?,?)") + .addPositionalValues(baseSample, 1) + .build()); + ResultSet results = sessionRule.session().execute("SELECT k,v FROM tblpk"); + Row row = results.one(); + T key = row.get("k", genericType); + assertThat(key).isEqualTo(baseSample); + } + + /** + * Validates that geometry values can be inserted into a column that is a clustering key in rows + * sharing a partition key and then validates that the rows can be retrieved by partition key. + * + * @test_category dse:geospatial + */ + @Test + public void should_accept_as_clustering_key() { + PreparedStatement insert = + sessionRule.session().prepare("INSERT INTO tblclustering (k0, k1, v) values (?,?,?)"); + BatchStatementBuilder batchbuilder = BatchStatement.builder(DefaultBatchType.UNLOGGED); + + int count = 0; + for (T value : sampleData) { + BoundStatement bound = + insert + .boundStatementBuilder() + .setInt(0, 0) + .set(1, value, genericType) + .setInt(2, count++) + .build(); + batchbuilder.addStatement(bound); + } + sessionRule.session().execute(batchbuilder.build()); + + ResultSet result = + sessionRule + .session() + .execute( + SimpleStatement.builder("SELECT * from tblclustering where k0=?") + .addPositionalValue(0) + .build()); + + // The order of rows returned is not significant for geospatial types since it is stored in + // lexicographic byte order (8 bytes at a time). Thus we pull them all sort and extract and + // ensure all values were returned. + List rows = result.all(); + + assertThat(rows) + .extracting(row -> row.get("k1", genericType)) + .containsOnlyElementsOf(sampleData) + .hasSameSizeAs(sampleData); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/LineStringIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/LineStringIT.java new file mode 100644 index 00000000000..c626f0e26c6 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/LineStringIT.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.util.List; +import java.util.UUID; +import org.assertj.core.util.Lists; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement(type = BackendType.DSE, minInclusive = "5.0") +public class LineStringIT extends GeometryIT { + + private static CcmRule ccm = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + + private static final String LINE_STRING_TYPE = "LineStringType"; + + public LineStringIT() { + super( + Lists.newArrayList( + LineString.fromPoints(Point.fromCoordinates(0, 10), Point.fromCoordinates(10, 0)), + LineString.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)), + LineString.fromPoints( + Point.fromCoordinates(-5, 0), + Point.fromCoordinates(0, 10), + Point.fromCoordinates(10, 5))), + LineString.class, + sessionRule); + } + + @BeforeClass + public static void initialize() { + onTestContextInitialized(LINE_STRING_TYPE, sessionRule); + } + + @Test + public void should_insert_and_retrieve_empty_linestring() { + LineString empty = LineString.fromWellKnownText("LINESTRING EMPTY"); + UUID key = Uuids.random(); + sessionRule + .session() + .execute( + SimpleStatement.builder("INSERT INTO tbl (k, g) VALUES (?, ?)") + .addPositionalValues(key, empty) + .build()); + + ResultSet result = + sessionRule + .session() + .execute( + SimpleStatement.builder("SELECT g from tbl where k=?") + .addPositionalValues(key) + .build()); + Row row = result.iterator().next(); + List points = row.get("g", LineString.class).getPoints(); + assertThat(points.isEmpty()).isTrue(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PointIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PointIT.java new file mode 100644 index 00000000000..b81049cd444 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PointIT.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.assertj.core.util.Lists; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement(type = BackendType.DSE, minInclusive = "5.0") +public class PointIT extends GeometryIT { + + private static CcmRule ccm = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + + private static final String POINT_TYPE = "PointType"; + + public PointIT() { + super( + Lists.newArrayList( + Point.fromCoordinates(-1.0, -5), + Point.fromCoordinates(0, 0), + Point.fromCoordinates(1.1, 2.2), + Point.fromCoordinates(Double.MIN_VALUE, 0), + Point.fromCoordinates(Double.MAX_VALUE, Double.MIN_VALUE)), + Point.class, + sessionRule); + } + + @BeforeClass + public static void initialize() { + onTestContextInitialized(POINT_TYPE, sessionRule); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PolygonIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PolygonIT.java new file mode 100644 index 00000000000..1d9d49bd776 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/geometry/PolygonIT.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.geometry; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.util.UUID; +import org.assertj.core.util.Lists; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement(type = BackendType.DSE, minInclusive = "5.0") +public class PolygonIT extends GeometryIT { + + private static CcmRule ccm = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + + private static final String POLYGON_TYPE = "PolygonType"; + + private static Polygon squareInMinDomain = + Polygon.fromPoints( + Point.fromCoordinates(Double.MIN_VALUE, Double.MIN_VALUE), + Point.fromCoordinates(Double.MIN_VALUE, Double.MIN_VALUE + 1), + Point.fromCoordinates(Double.MIN_VALUE + 1, Double.MIN_VALUE + 1), + Point.fromCoordinates(Double.MIN_VALUE + 1, Double.MIN_VALUE)); + + private static Polygon triangle = + Polygon.fromPoints( + Point.fromCoordinates(-5, 10), + Point.fromCoordinates(5, 5), + Point.fromCoordinates(10, -5)); + + private static Polygon complexPolygon = + Polygon.builder() + .addRing( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)) + .addRing( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(2, 1)) + .addRing( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(3, 2), + Point.fromCoordinates(4, 2), + Point.fromCoordinates(4, 1)) + .build(); + + public PolygonIT() { + super( + Lists.newArrayList(squareInMinDomain, complexPolygon, triangle), + Polygon.class, + sessionRule); + } + + @BeforeClass + public static void initialize() { + onTestContextInitialized(POLYGON_TYPE, sessionRule); + } + + /** + * Validates that an empty {@link Polygon} can be inserted and retrieved. + * + * @jira_ticket JAVA-1076 + * @test_category dse:graph + */ + @Test + public void should_insert_and_retrieve_empty_polygon() { + Polygon empty = Polygon.builder().build(); + UUID key = Uuids.random(); + sessionRule + .session() + .execute( + SimpleStatement.builder("INSERT INTO tbl (k, g) VALUES (?, ?)") + .addPositionalValues(key, empty) + .build()); + + ResultSet result = + sessionRule + .session() + .execute( + SimpleStatement.builder("SELECT g from tbl where k=?") + .addPositionalValues(key) + .build()); + Row row = result.iterator().next(); + assertThat(row.get("g", Polygon.class).getInteriorRings()).isEmpty(); + assertThat(row.get("g", Polygon.class).getExteriorRing()).isEmpty(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeIT.java new file mode 100644 index 00000000000..9b2370e3376 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/data/time/DateRangeIT.java @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.data.time; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.google.common.collect.Sets; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; + +@Category({ParallelizableTests.class}) +@BackendRequirement(type = BackendType.DSE, minInclusive = "5.1") +public class DateRangeIT { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = + SessionRule.builder(ccmRule) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .build()) + .build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Rule public TestName testName = new TestName(); + + /** + * Validates that data can be retrieved by primary key where its primary key is a 'DateRangeType' + * column, and that the data returned properly parses into the expected {@link DateRange}. + */ + @Test + public void should_use_date_range_as_primary_key() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute( + String.format("CREATE TABLE %s (k 'DateRangeType' PRIMARY KEY, v int)", tableName)); + session.execute( + String.format("INSERT INTO %s (k, v) VALUES ('[2010-12-03 TO 2010-12-04]', 1)", tableName)); + session.execute( + String.format( + "INSERT INTO %s (k, v) VALUES ('[2015-12-03T10:15:30.001Z TO 2016-01-01T00:05:11.967Z]', 2)", + tableName)); + + List rows = session.execute("SELECT * FROM " + tableName).all(); + + assertThat(rows).hasSize(2); + assertThat(rows.get(0).get("k", DateRange.class)) + .isEqualTo(DateRange.parse("[2010-12-03 TO 2010-12-04]")); + assertThat(rows.get(1).get("k", DateRange.class)) + .isEqualTo(DateRange.parse("[2015-12-03T10:15:30.001Z TO 2016-01-01T00:05:11.967Z]")); + + rows = + session + .execute( + String.format( + "SELECT * FROM %s WHERE k = '[2015-12-03T10:15:30.001Z TO 2016-01-01T00:05:11.967]'", + tableName)) + .all(); + assertThat(rows.size()).isEqualTo(1); + assertThat(rows.get(0).getInt("v")).isEqualTo(2); + } + + /** + * Validates that a 'DateRangeType' column can take a variety of {@link DateRange} inputs: + * + *

          + *
        1. Upper bound unbounded + *
        2. Lower bound unbounded + *
        3. Unbounded + *
        4. Bounded + *
        5. null + *
        6. unset + *
        + */ + @Test + public void should_store_date_range() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute( + String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); + session.execute( + String.format( + "INSERT INTO %s (k, v) VALUES (1, '[2000-01-01T10:15:30.301Z TO *]')", tableName)); + session.execute( + String.format("INSERT INTO %s (k, v) VALUES (2, '[2000-02 TO 2000-03]')", tableName)); + session.execute(String.format("INSERT INTO %s (k, v) VALUES (3, '[* TO 2020]')", tableName)); + session.execute(String.format("INSERT INTO %s (k, v) VALUES (4, null)", tableName)); + session.execute(String.format("INSERT INTO %s (k) VALUES (5)", tableName)); + session.execute(String.format("INSERT INTO %s (k, v) VALUES (6, '*')", tableName)); + + List rows = session.execute("SELECT * FROM " + tableName).all(); + + assertThat(rows) + .extracting(input -> input.get("v", DateRange.class)) + .containsOnly( + DateRange.parse("[2000-01-01T10:15:30.301Z TO *]"), + DateRange.parse("[2000-02 TO 2000-03]"), + DateRange.parse("[* TO 2020]"), + null, + DateRange.parse("*")); + } + + /** + * Validates that if a provided {@link DateRange} for a 'DateRangeType' column has the bounds + * reversed (lower bound is later than upper bound), then an {@link InvalidQueryException} is + * thrown. + */ + @Test + public void should_disallow_invalid_order() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute( + String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); + + assertThatThrownBy( + () -> + session.execute( + String.format( + "INSERT INTO %s (k, v) " + + "VALUES (1, '[2020-01-01T10:15:30.009Z TO 2010-01-01T00:05:11.031Z]')", + tableName))) + .isInstanceOf(InvalidQueryException.class) + .hasMessageContaining("Wrong order: 2020-01-01T10:15:30.009Z TO 2010-01-01T00:05:11.031Z") + .hasMessageContaining( + "Could not parse date range: [2020-01-01T10:15:30.009Z TO 2010-01-01T00:05:11.031Z]"); + } + + /** Validates that {@link DateRange} can be used in UDT and Tuple types. */ + @Test + public void should_allow_date_range_in_udt_and_tuple() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute("CREATE TYPE IF NOT EXISTS test_udt (i int, range 'DateRangeType')"); + session.execute( + String.format( + "CREATE TABLE %s (k int PRIMARY KEY, u test_udt, uf frozen, " + + "t tuple<'DateRangeType', int>, tf frozen>)", + tableName)); + session.execute( + String.format( + "INSERT INTO %s (k, u, uf, t, tf) VALUES (" + + "1, " + + "{i: 10, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " + + "{i: 20, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " + + "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 30), " + + "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 40))", + tableName)); + + DateRange expected = DateRange.parse("[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]"); + + List rows = session.execute("SELECT * FROM " + tableName).all(); + assertThat(rows).hasSize(1); + + UdtValue u = rows.get(0).get("u", UdtValue.class); + DateRange dateRange = u.get("range", DateRange.class); + assertThat(dateRange).isEqualTo(expected); + assertThat(u.getInt("i")).isEqualTo(10); + + u = rows.get(0).get("uf", UdtValue.class); + dateRange = u.get("range", DateRange.class); + assertThat(dateRange).isEqualTo(expected); + assertThat(u.getInt("i")).isEqualTo(20); + + TupleValue t = rows.get(0).get("t", TupleValue.class); + dateRange = t.get(0, DateRange.class); + assertThat(dateRange).isEqualTo(expected); + assertThat(t.getInt(1)).isEqualTo(30); + + t = rows.get(0).get("tf", TupleValue.class); + dateRange = t.get(0, DateRange.class); + assertThat(dateRange).isEqualTo(expected); + assertThat(t.getInt(1)).isEqualTo(40); + } + + /** Validates that {@link DateRange} can be used in Collection types (Map, Set, List). */ + @Test + public void should_allow_date_range_in_collections() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute( + String.format( + "CREATE TABLE %s (k int PRIMARY KEY, l list<'DateRangeType'>, s set<'DateRangeType'>, " + + "dr2i map<'DateRangeType', int>, i2dr map)", + tableName)); + session.execute( + String.format( + "INSERT INTO %s (k, l, s, i2dr, dr2i) VALUES (" + + "1, " + // l + + "['[2000-01-01T10:15:30.001Z TO 2020]', '[2010-01-01T10:15:30.001Z TO 2020]'," + + " '2001-01-02'], " + // s + + "{'[2000-01-01T10:15:30.001Z TO 2020]', '[2000-01-01T10:15:30.001Z TO 2020]', " + + "'[2010-01-01T10:15:30.001Z TO 2020]'}, " + // i2dr + + "{1: '[2000-01-01T10:15:30.001Z TO 2020]', " + + "2: '[2010-01-01T10:15:30.001Z TO 2020]'}, " + // dr2i + + "{'[2000-01-01T10:15:30.001Z TO 2020]': 1, " + + "'[2010-01-01T10:15:30.001Z TO 2020]': 2})", + tableName)); + + List rows = session.execute("SELECT * FROM " + tableName).all(); + assertThat(rows.size()).isEqualTo(1); + + List drList = rows.get(0).getList("l", DateRange.class); + assertThat(drList.size()).isEqualTo(3); + assertThat(drList.get(0)).isEqualTo(DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]")); + assertThat(drList.get(1)).isEqualTo(DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]")); + assertThat(drList.get(2)).isEqualTo(DateRange.parse("2001-01-02")); + + Set drSet = rows.get(0).getSet("s", DateRange.class); + assertThat(drSet.size()).isEqualTo(2); + assertThat(drSet) + .isEqualTo( + Sets.newHashSet( + DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]"), + DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]"))); + + Map dr2i = rows.get(0).getMap("dr2i", DateRange.class, Integer.class); + assertThat(dr2i.size()).isEqualTo(2); + assertThat((int) dr2i.get(DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]"))).isEqualTo(1); + assertThat((int) dr2i.get(DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]"))).isEqualTo(2); + + Map i2dr = rows.get(0).getMap("i2dr", Integer.class, DateRange.class); + assertThat(i2dr.size()).isEqualTo(2); + assertThat(i2dr.get(1)).isEqualTo(DateRange.parse("[2000-01-01T10:15:30.001Z TO 2020]")); + assertThat(i2dr.get(2)).isEqualTo(DateRange.parse("[2010-01-01T10:15:30.001Z TO 2020]")); + } + + /** + * Validates that a 'DateRangeType' column can take a {@link DateRange} inputs as a prepared + * statement parameter. + */ + @Test + public void should_bind_date_range_in_prepared_statement() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute( + String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); + PreparedStatement statement = + session.prepare(String.format("INSERT INTO %s (k,v) VALUES(?,?)", tableName)); + + DateRange expected = DateRange.parse("[2007-12-03 TO 2007-12]"); + session.execute(statement.bind(1, expected)); + List rows = session.execute("SELECT * FROM " + tableName).all(); + assertThat(rows.size()).isEqualTo(1); + DateRange actual = rows.get(0).get("v", DateRange.class); + assertThat(actual).isEqualTo(expected); + assertThat(actual.getLowerBound().getPrecision()).isEqualTo(DateRangePrecision.DAY); + assertThat(actual.getUpperBound()) + .hasValueSatisfying( + upperBound -> + assertThat(upperBound.getPrecision()).isEqualTo(DateRangePrecision.MONTH)); + assertThat(actual.toString()).isEqualTo("[2007-12-03 TO 2007-12]"); + + expected = DateRange.parse("[* TO *]"); + session.execute(statement.bind(1, expected)); + rows = session.execute("SELECT * FROM " + tableName).all(); + assertThat(rows.size()).isEqualTo(1); + actual = rows.get(0).get("v", DateRange.class); + assertThat(actual).isEqualTo(expected); + assertThat(actual.getLowerBound().isUnbounded()).isTrue(); + assertThat(actual.isSingleBounded()).isFalse(); + assertThat(actual.getUpperBound()) + .hasValueSatisfying(upperBound -> assertThat(upperBound.isUnbounded()).isTrue()); + assertThat(actual.toString()).isEqualTo("[* TO *]"); + + expected = DateRange.parse("*"); + session.execute(statement.bind(1, expected)); + rows = session.execute("SELECT * FROM " + tableName).all(); + assertThat(rows.size()).isEqualTo(1); + actual = rows.get(0).get("v", DateRange.class); + assertThat(actual).isEqualTo(expected); + assertThat(actual.getLowerBound().isUnbounded()).isTrue(); + assertThat(actual.isSingleBounded()).isTrue(); + assertThat(actual.toString()).isEqualTo("*"); + } + + /** + * Validates that 'DateRangeType' columns are retrievable using SELECT JSON queries + * and that their value representations match their input. + */ + @Test + public void should_select_date_range_using_json() throws Exception { + CqlSession session = sessionRule.session(); + String tableName = testName.getMethodName(); + + session.execute( + String.format("CREATE TABLE %s (k int PRIMARY KEY, v 'DateRangeType')", tableName)); + PreparedStatement statement = + session.prepare(String.format("INSERT INTO %s (k,v) VALUES(?,?)", tableName)); + + DateRange expected = DateRange.parse("[2007-12-03 TO 2007-12]"); + session.execute(statement.bind(1, expected)); + List rows = session.execute("SELECT JSON * FROM " + tableName).all(); + assertThat(rows.get(0).getString(0)) + .isEqualTo("{\"k\": 1, \"v\": \"[2007-12-03 TO 2007-12]\"}"); + + expected = DateRange.parse("[* TO *]"); + session.execute(statement.bind(1, expected)); + rows = session.execute("SELECT JSON * FROM " + tableName).all(); + assertThat(rows.get(0).getString(0)).isEqualTo("{\"k\": 1, \"v\": \"[* TO *]\"}"); + + expected = DateRange.parse("*"); + session.execute(statement.bind(1, expected)); + rows = session.execute("SELECT JSON * FROM " + tableName).all(); + assertThat(rows.get(0).getString(0)).isEqualTo("{\"k\": 1, \"v\": \"*\"}"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphDataTypeITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphDataTypeITBase.java new file mode 100644 index 00000000000..d42b156a8be --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphDataTypeITBase.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import com.datastax.oss.driver.shaded.guava.common.net.InetAddresses; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.AssumptionViolatedException; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public abstract class ClassicGraphDataTypeITBase { + + private static final boolean IS_DSE50 = + CcmBridge.VERSION.compareTo(Objects.requireNonNull(Version.parse("5.1"))) < 0; + private static final Set TYPES_REQUIRING_DSE51 = + ImmutableSet.of("Date()", "Time()", "Point()", "Linestring()", "Polygon()"); + + private static final AtomicInteger SCHEMA_COUNTER = new AtomicInteger(); + + @DataProvider + public static Object[][] typeSamples() { + return new Object[][] { + // Types that DSE supports. + {"Boolean()", true}, + {"Boolean()", false}, + {"Smallint()", Short.MAX_VALUE}, + {"Smallint()", Short.MIN_VALUE}, + {"Smallint()", (short) 0}, + {"Smallint()", (short) 42}, + {"Int()", Integer.MAX_VALUE}, + {"Int()", Integer.MIN_VALUE}, + {"Int()", 0}, + {"Int()", 42}, + {"Bigint()", Long.MAX_VALUE}, + {"Bigint()", Long.MIN_VALUE}, + {"Bigint()", 0L}, + {"Double()", Double.MAX_VALUE}, + {"Double()", Double.MIN_VALUE}, + {"Double()", 0.0d}, + {"Double()", Math.PI}, + {"Float()", Float.MAX_VALUE}, + {"Float()", Float.MIN_VALUE}, + {"Float()", 0.0f}, + {"Text()", ""}, + {"Text()", "75"}, + {"Text()", "Lorem Ipsum"}, + // Inet, UUID, Date + {"Inet()", InetAddresses.forString("127.0.0.1")}, + {"Inet()", InetAddresses.forString("0:0:0:0:0:0:0:1")}, + {"Inet()", InetAddresses.forString("2001:db8:85a3:0:0:8a2e:370:7334")}, + {"Uuid()", UUID.randomUUID()}, + // Timestamps + {"Timestamp()", Instant.ofEpochMilli(123)}, + {"Timestamp()", Instant.ofEpochMilli(1488313909)}, + {"Duration()", java.time.Duration.parse("P2DT3H4M")}, + {"Date()", LocalDate.of(2016, 5, 12)}, + {"Time()", LocalTime.parse("18:30:41.554")}, + {"Time()", LocalTime.parse("18:30:41.554010034")}, + // Blob + {"Blob()", "Hello World!".getBytes(Charsets.UTF_8)}, + // BigDecimal/BigInteger + {"Decimal()", new BigDecimal("8675309.9998")}, + {"Varint()", new BigInteger("8675309")}, + // Geospatial types + {"Point().withBounds(-2, -2, 2, 2)", Point.fromCoordinates(0, 1)}, + {"Point().withBounds(-40, -40, 40, 40)", Point.fromCoordinates(-5, 20)}, + { + "Linestring().withGeoBounds()", + LineString.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)) + }, + { + "Polygon().withGeoBounds()", + Polygon.builder() + .addRing( + Point.fromCoordinates(35, 10), + Point.fromCoordinates(45, 45), + Point.fromCoordinates(15, 40), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(35, 10)) + .addRing( + Point.fromCoordinates(20, 30), + Point.fromCoordinates(35, 35), + Point.fromCoordinates(30, 20), + Point.fromCoordinates(20, 30)) + .build() + } + }; + } + + @UseDataProvider("typeSamples") + @Test + public void should_create_and_retrieve_vertex_property_with_correct_type( + String type, Object value) { + if (IS_DSE50 && requiresDse51(type)) { + throw new AssumptionViolatedException(type + " not supported in DSE " + CcmBridge.VERSION); + } + + int id = SCHEMA_COUNTER.getAndIncrement(); + + String vertexLabel = "vertex" + id; + String propertyName = "prop" + id; + GraphStatement addVertexLabelAndProperty = + ScriptGraphStatement.builder( + "schema.propertyKey(property)." + + type + + ".create()\n" + + "schema.vertexLabel(vertexLabel).properties(property).create()") + .setQueryParam("vertexLabel", vertexLabel) + .setQueryParam("property", propertyName) + .build(); + + session().execute(addVertexLabelAndProperty); + + Vertex v = insertVertexAndReturn(vertexLabel, propertyName, value); + + assertThat(v).hasProperty(propertyName, value); + } + + private boolean requiresDse51(String type) { + for (String prefix : TYPES_REQUIRING_DSE51) { + if (type.startsWith(prefix)) { + return true; + } + } + return false; + } + + public abstract Vertex insertVertexAndReturn( + String vertexLabel, String propertyName, Object value); + + /** + * Note that the {@link SessionRule} (and setupSchema method) must be redeclared in each subclass, + * since it depends on the CCM rule that can't be shared across serial tests. + */ + public abstract CqlSession session(); +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphGeoSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphGeoSearchIndexIT.java new file mode 100644 index 00000000000..9878f1186e6 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphGeoSearchIndexIT.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.shaded.guava.common.base.Joiner; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import java.util.ArrayList; +import java.util.Collection; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1", + description = "DSE 5.1 required for graph geo indexing") +public class ClassicGraphGeoSearchIndexIT extends GraphGeoSearchIndexITBase { + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); + + @Override + protected boolean isGraphBinary() { + return false; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return g; + } + + @BeforeClass + public static void setup() { + for (String setupQuery : geoIndices()) { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); + } + + CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user_p", true); + } + + /** + * A schema representing an address book with search enabled on name, description, and + * coordinates. + */ + public static Collection geoIndices() { + Object[][] providerIndexTypes = indexTypes(); + String[] indexTypes = new String[providerIndexTypes.length]; + for (int i = 0; i < providerIndexTypes.length; i++) { + indexTypes[i] = (String) providerIndexTypes[i][0]; + } + + StringBuilder schema = new StringBuilder("schema.propertyKey('full_name').Text().create()\n"); + StringBuilder propertyKeys = new StringBuilder(); + StringBuilder vertexLabel = new StringBuilder("schema.vertexLabel('user').properties("); + StringBuilder indices = new StringBuilder(); + StringBuilder vertex0 = + new StringBuilder("g.addV('user').property('full_name', 'Paul Thomas Joe')"); + StringBuilder vertex1 = + new StringBuilder("g.addV('user').property('full_name', 'George Bill Steve')"); + String vertex2 = "g.addV('user').property('full_name', 'James Paul Joe')"; + StringBuilder vertex3 = new StringBuilder("g.addV('user').property('full_name', 'Jill Alice')"); + + ArrayList propertyNames = new ArrayList<>(); + propertyNames.add("'full_name'"); + + for (String indexType : indexTypes) { + + propertyKeys.append( + String.format( + "schema.propertyKey('pointPropWithBounds_%s')." + + "Point().withBounds(0.000000, 0.000000, 100.000000, 100.000000).create()\n", + indexType)); + + propertyKeys.append( + String.format( + "schema.propertyKey('pointPropWithGeoBounds_%s').Point().withGeoBounds().create()\n", + indexType)); + + propertyNames.add("'pointPropWithBounds_" + indexType + "'"); + propertyNames.add("'pointPropWithGeoBounds_" + indexType + "'"); + + if (indexType.equals("search")) { + + indices.append( + String.format( + "schema.vertexLabel('user').index('search').search().by('pointPropWithBounds_%s').withError(0.00001, 0.0).by('pointPropWithGeoBounds_%s').withError(0.00001, 0.0).add()\n", + indexType, indexType)); + } else { + + indices.append( + String.format( + "schema.vertexLabel('user').index('by_pointPropWithBounds_%s').%s().by('pointPropWithBounds_%s').add()\n", + indexType, indexType, indexType)); + + indices.append( + String.format( + "schema.vertexLabel('user').index('by_pointPropWithGeoBounds_%s').%s().by('pointPropWithGeoBounds_%s').add()\n", + indexType, indexType, indexType)); + } + + vertex0.append( + String.format( + ".property('pointPropWithBounds_%s', 'POINT(40.0001 40)').property('pointPropWithGeoBounds_%s', 'POINT(40.0001 40)')", + indexType, indexType)); + vertex1.append( + String.format( + ".property('pointPropWithBounds_%s', 'POINT(40 40)').property('pointPropWithGeoBounds_%s', 'POINT(40 40)')", + indexType, indexType)); + vertex3.append( + String.format( + ".property('pointPropWithBounds_%s', 'POINT(30 30)').property('pointPropWithGeoBounds_%s', 'POINT(30 30)')", + indexType, indexType)); + } + + vertexLabel.append(Joiner.on(", ").join(propertyNames)); + vertexLabel.append(").create()\n"); + + schema.append(propertyKeys).append(vertexLabel).append(indices); + + return Lists.newArrayList( + SampleGraphScripts.MAKE_STRICT, + schema.toString(), + vertex0.toString(), + vertex1.toString(), + vertex2, + vertex3.toString()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphTextSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphTextSearchIndexIT.java new file mode 100644 index 00000000000..13d503d6b25 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/ClassicGraphTextSearchIndexIT.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.shaded.guava.common.base.Joiner; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import java.util.ArrayList; +import java.util.Collection; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1", + description = "DSE 5.1 required for graph geo indexing") +public class ClassicGraphTextSearchIndexIT extends GraphTextSearchIndexITBase { + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); + + /** + * A schema representing an address book with 3 properties (full_name_*, description_*, alias_*) + * created for each type of index (search, secondary, materialized). + */ + public static Collection textIndices() { + Object[][] providerIndexTypes = indexTypes(); + String[] indexTypes = new String[providerIndexTypes.length]; + for (int i = 0; i < providerIndexTypes.length; i++) { + indexTypes[i] = (String) providerIndexTypes[i][0]; + } + + StringBuilder schema = new StringBuilder(); + StringBuilder propertyKeys = new StringBuilder(); + StringBuilder vertexLabel = new StringBuilder("schema.vertexLabel('user').properties("); + StringBuilder indices = new StringBuilder(); + StringBuilder vertex0 = new StringBuilder("g.addV('user')"); + StringBuilder vertex1 = new StringBuilder("g.addV('user')"); + StringBuilder vertex2 = new StringBuilder("g.addV('user')"); + StringBuilder vertex3 = new StringBuilder("g.addV('user')"); + + ArrayList propertyNames = new ArrayList<>(); + for (String indexType : indexTypes) { + propertyKeys.append( + String.format( + "schema.propertyKey('full_name_%s').Text().create()\n" + + "schema.propertyKey('description_%s').Text().create()\n" + + "schema.propertyKey('alias_%s').Text().create()\n", + indexType, indexType, indexType)); + + propertyNames.add("'full_name_" + indexType + "'"); + propertyNames.add("'description_" + indexType + "'"); + propertyNames.add("'alias_" + indexType + "'"); + + if (indexType.equals("search")) { + indices.append( + "schema.vertexLabel('user').index('search').search().by('full_name_search').asString().by('description_search').asText().by('alias_search').asString().add()\n"); + } else { + indices.append( + String.format( + "schema.vertexLabel('user').index('by_full_name_%s').%s().by('full_name_%s').add()\n", + indexType, indexType, indexType)); + indices.append( + String.format( + "schema.vertexLabel('user').index('by_description_%s').%s().by('description_%s').add()\n", + indexType, indexType, indexType)); + indices.append( + String.format( + "schema.vertexLabel('user').index('by_alias_name_%s').%s().by('alias_%s').add()\n", + indexType, indexType, indexType)); + } + + vertex0.append( + String.format( + ".property('full_name_%s', 'Paul Thomas Joe').property('description_%s', 'Lives by the hospital').property('alias_%s', 'mario')", + indexType, indexType, indexType)); + vertex1.append( + String.format( + ".property('full_name_%s', 'George Bill Steve').property('description_%s', 'A cold dude').property('alias_%s', 'wario')", + indexType, indexType, indexType)); + vertex2.append( + String.format( + ".property('full_name_%s', 'James Paul Joe').property('description_%s', 'Likes to hang out').property('alias_%s', 'bowser')", + indexType, indexType, indexType)); + vertex3.append( + String.format( + ".property('full_name_%s', 'Jill Alice').property('description_%s', 'Enjoys a very nice cold coca cola').property('alias_%s', 'peach')", + indexType, indexType, indexType)); + } + + vertexLabel.append(Joiner.on(", ").join(propertyNames)); + vertexLabel.append(").create()\n"); + + schema.append(propertyKeys).append(vertexLabel).append(indices); + + return Lists.newArrayList( + SampleGraphScripts.MAKE_STRICT, + schema.toString(), + vertex0.toString(), + vertex1.toString(), + vertex2.toString(), + vertex3.toString()); + } + + @BeforeClass + public static void setup() { + for (String setupQuery : textIndices()) { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); + } + + CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user_p", true); + } + + @Override + protected boolean isGraphBinary() { + return false; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return g; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphDataTypeITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphDataTypeITBase.java new file mode 100644 index 00000000000..7fe31a059d7 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphDataTypeITBase.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.oss.driver.api.core.type.DataTypes.BIGINT; +import static com.datastax.oss.driver.api.core.type.DataTypes.INT; +import static com.datastax.oss.driver.api.core.type.DataTypes.TEXT; +import static com.datastax.oss.driver.api.core.type.DataTypes.listOf; +import static com.datastax.oss.driver.api.core.type.DataTypes.tupleOf; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.LineString; +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.data.geometry.Polygon; +import com.datastax.dse.driver.api.core.graph.predicates.Geo; +import com.datastax.dse.driver.api.core.type.DseDataTypes; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.TupleType; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneId; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.Map; +import org.junit.Test; + +public abstract class CoreGraphDataTypeITBase { + + protected abstract CqlSession session(); + + protected abstract String graphName(); + + @Test + public void should_create_and_retrieve_correct_data_with_types() { + CqlSession session = session(); + + // use CQL to create type for now because DSP-17567 is not in yet, so this is more stable + session.execute( + String.format( + "CREATE TYPE %s.udt_graphbinary(simple text, complex tuple, missing text)", + graphName())); + + session.execute( + String.format( + "CREATE TYPE %s.udt_graphbinarygeo(point 'PointType', line 'LineStringType', poly 'PolygonType')", + graphName())); + + ImmutableMap.Builder properties = + ImmutableMap.builder() + .put("Ascii", "test") + .put("Bigint", 5L) + .put("Boolean", true) + .put("Date", LocalDate.of(2007, 7, 7)) + .put("Decimal", BigDecimal.valueOf(2.3)) + .put("Double", 4.5d) + .put("Float", 4.8f) + .put("Int", 45) + .put("Smallint", (short) 1) + .put("Text", "test") + .put("Time", LocalTime.now(ZoneId.systemDefault())) + .put("Timeuuid", Uuids.timeBased()) + .put("Timestamp", Instant.now().truncatedTo(ChronoUnit.MILLIS)) + .put("Uuid", java.util.UUID.randomUUID()) + .put("Varint", BigInteger.valueOf(3234)) + .put("Blob", ByteBuffer.wrap(new byte[] {1, 2, 3})) + .put("Tinyint", (byte) 38) + .put("listOf(Int)", Arrays.asList(2, 3, 4)) + .put("setOf(Int)", Sets.newHashSet(2, 3, 4)) + .put("mapOf(Int, Text)", ImmutableMap.of(2, "two", 4, "four")) + .put("Duration", CqlDuration.newInstance(1, 2, 3)) + .put("LineString", Geo.lineString(1, 2, 3, 4, 5, 6)) + .put("Point", Geo.point(3, 4)) + .put("Polygon", Geo.polygon(Geo.point(3, 4), Geo.point(5, 4), Geo.point(6, 6))) + .put("tupleOf(Int, Text)", tupleOf(INT, TEXT).newValue(5, "Bar")) + .put( + "typeOf('udt_graphbinary')", + session + .getMetadata() + .getKeyspace(graphName()) + .flatMap(keyspace -> keyspace.getUserDefinedType("udt_graphbinary")) + .orElseThrow(IllegalStateException::new) + .newValue( + "some text", tupleOf(INT, TEXT).newValue(5, "Bar"), "some missing text")) + .put( + "typeOf('udt_graphbinarygeo')", + session + .getMetadata() + .getKeyspace(graphName()) + .flatMap( + keyspaceMetadata -> + keyspaceMetadata.getUserDefinedType("udt_graphbinarygeo")) + .orElseThrow(IllegalStateException::new) + .newValue( + Point.fromCoordinates(3.3, 4.4), + LineString.fromPoints( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(3, 3)), + Polygon.fromPoints( + Point.fromCoordinates(3, 4), + Point.fromCoordinates(5, 4), + Point.fromCoordinates(6, 6)))); + + TupleType tuple = tupleOf(DseDataTypes.POINT, DseDataTypes.LINE_STRING, DseDataTypes.POLYGON); + tuple.attach(session.getContext()); + + properties.put( + "tupleOf(Point, LineString, Polygon)", + tuple.newValue( + Point.fromCoordinates(3.3, 4.4), + LineString.fromPoints( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(3, 3)), + Polygon.fromPoints( + Point.fromCoordinates(3, 4), + Point.fromCoordinates(5, 4), + Point.fromCoordinates(6, 6)))); + + int vertexID = 1; + String vertexLabel = "graphBinaryAllTypes"; + + runTest(properties.build(), vertexLabel, vertexID); + } + + @Test + public void should_insert_and_retrieve_nested_UDTS_and_tuples() { + CqlSession session = session(); + + // use CQL to create type for now because DSP-17567 is not in yet, so this is more stable + session.execute(String.format("CREATE TYPE %s.udt1(a int, b text)", graphName())); + + session.execute( + String.format( + "CREATE TYPE %s.udt2(" + + "a int" + + ", b text" + + ", c frozen" + + ", mylist list" + + ", mytuple_withlist tuple>>>" + + ")", + graphName())); + + session.execute( + String.format( + "CREATE TYPE %s.udt3(" + + "a list" + + ", b set" + + ", c map" + + ", d list>>" + + ", e set>>" + + ", f list>>" + + ")", + graphName())); + + UserDefinedType udt1 = + session + .getMetadata() + .getKeyspace(graphName()) + .flatMap(keyspace -> keyspace.getUserDefinedType("udt1")) + .orElseThrow(IllegalStateException::new); + UdtValue udtValue1 = udt1.newValue(1, "2"); + + UserDefinedType udt2 = + session + .getMetadata() + .getKeyspace(graphName()) + .flatMap(keyspace -> keyspace.getUserDefinedType("udt2")) + .orElseThrow(IllegalStateException::new); + TupleType secondNested = tupleOf(BIGINT, listOf(BIGINT)); + TupleType firstNested = tupleOf(TEXT, secondNested); + UdtValue udtValue2 = + udt2.newValue( + 1, + "2", + udt1.newValue(3, "4"), + ImmutableList.of(5L), + firstNested.newValue("6", secondNested.newValue(7L, ImmutableList.of(8L)))); + + UserDefinedType udt3 = + session + .getMetadata() + .getKeyspace(graphName()) + .flatMap(keyspace -> keyspace.getUserDefinedType("udt3")) + .orElseThrow(IllegalStateException::new); + UdtValue udtValue3 = + udt3.newValue( + ImmutableList.of(1), + ImmutableSet.of(2.1f), + ImmutableMap.of("3", 4L), + ImmutableList.of(ImmutableList.of(5.1d, 6.1d), ImmutableList.of(7.1d)), + ImmutableSet.of(ImmutableSet.of(8.1f), ImmutableSet.of(9.1f)), + ImmutableList.of(tupleOf(INT, TEXT).newValue(10, "11"))); + + Map properties = + ImmutableMap.builder() + .put("frozen(typeOf('udt1'))", udtValue1) + .put("frozen(typeOf('udt2'))", udtValue2) + .put("frozen(typeOf('udt3'))", udtValue3) + .build(); + + int vertexID = 1; + String vertexLabel = "graphBinaryNestedTypes"; + + runTest(properties, vertexLabel, vertexID); + } + + private void runTest(Map properties, String vertexLabel, int vertexID) { + // setup schema + session().execute(createVertexLabelStatement(properties, vertexLabel)); + + // execute insert query and read query + Map results = insertVertexThenReadProperties(properties, vertexID, vertexLabel); + + // test valid properties are returned + properties.forEach((k, v) -> assertThat(results.get(formatPropertyName(k))).isEqualTo(v)); + } + + private static GraphStatement createVertexLabelStatement( + Map properties, String vertexLabel) { + StringBuilder ddl = + new StringBuilder("schema.vertexLabel(vertexLabel).ifNotExists().partitionBy('id', Int)"); + + for (Map.Entry entry : properties.entrySet()) { + String typeDefinition = entry.getKey(); + String propName = formatPropertyName(typeDefinition); + + ddl.append(String.format(".property('%s', %s)", propName, typeDefinition)); + } + ddl.append(".create()"); + + return ScriptGraphStatement.newInstance(ddl.toString()) + .setQueryParam("vertexLabel", vertexLabel); + } + + protected abstract Map insertVertexThenReadProperties( + Map properties, int vertexID, String vertexLabel); + + protected static String formatPropertyName(String originalName) { + return String.format( + "prop%s", + originalName.replace("(", "").replace(")", "").replace(", ", "").replace("'", "")); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphGeoSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphGeoSearchIndexIT.java new file mode 100644 index 00000000000..12db8820117 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphGeoSearchIndexIT.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import java.util.Collection; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +public class CoreGraphGeoSearchIndexIT extends GraphGeoSearchIndexITBase { + + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()) + .with("allow-filtering"); + + @Override + protected boolean isGraphBinary() { + return true; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return g; + } + + @BeforeClass + public static void setup() { + for (String setupQuery : geoIndices()) { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); + } + + CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user", true); + } + + /** + * A schema representing an address book with search enabled on name, description, and + * coordinates. + */ + public static Collection geoIndices() { + Object[][] providerIndexTypes = indexTypes(); + String[] indexTypes = new String[providerIndexTypes.length]; + for (int i = 0; i < providerIndexTypes.length; i++) { + indexTypes[i] = (String) providerIndexTypes[i][0]; + } + + StringBuilder schema = + new StringBuilder("schema.vertexLabel('user').partitionBy('full_name', Text)"); + StringBuilder propertyKeys = new StringBuilder(); + StringBuilder indices = new StringBuilder(); + StringBuilder vertex0 = + new StringBuilder("g.addV('user').property('full_name', 'Paul Thomas Joe')"); + StringBuilder vertex1 = + new StringBuilder("g.addV('user').property('full_name', 'George Bill Steve')"); + String vertex2 = "g.addV('user').property('full_name', 'James Paul Joe')"; + StringBuilder vertex3 = new StringBuilder("g.addV('user').property('full_name', 'Jill Alice')"); + + for (String indexType : indexTypes) { + propertyKeys.append(String.format(".property('pointPropWithBounds_%s', Point)\n", indexType)); + + propertyKeys.append( + String.format(".property('pointPropWithGeoBounds_%s', Point)\n", indexType)); + + if (indexType.equals("search")) { + indices.append( + String.format( + "schema.vertexLabel('user').searchIndex().by('pointPropWithBounds_%s').by('pointPropWithGeoBounds_%s').create()\n", + indexType, indexType)); + + } else { + throw new UnsupportedOperationException("IndexType other than search is not supported."); + } + + vertex0.append( + String.format( + ".property('pointPropWithBounds_%s', point(40.0001,40)).property('pointPropWithGeoBounds_%s', point(40.0001,40))", + indexType, indexType)); + vertex1.append( + String.format( + ".property('pointPropWithBounds_%s', point(40,40)).property('pointPropWithGeoBounds_%s', point(40,40))", + indexType, indexType)); + vertex3.append( + String.format( + ".property('pointPropWithBounds_%s', point(30,30)).property('pointPropWithGeoBounds_%s', point(30,30))", + indexType, indexType)); + } + + schema.append(propertyKeys).append(".create();\n").append(indices); + + return Lists.newArrayList( + schema.toString(), vertex0.toString(), vertex1.toString(), vertex2, vertex3.toString()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphTextSearchIndexIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphTextSearchIndexIT.java new file mode 100644 index 00000000000..5545c3c00ac --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CoreGraphTextSearchIndexIT.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import java.util.Collection; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +public class CoreGraphTextSearchIndexIT extends GraphTextSearchIndexITBase { + + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder().withDseWorkloads("graph", "solr").build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()) + .with("allow-filtering"); + + @Override + protected boolean isGraphBinary() { + return true; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return g; + } + + /** + * A schema representing an address book with 3 properties (full_name_*, description_*, alias_*) + * created for each type of index (search, secondary, materialized). + */ + public static Collection textIndices() { + Object[][] providerIndexTypes = indexTypes(); + String[] indexTypes = new String[providerIndexTypes.length]; + for (int i = 0; i < providerIndexTypes.length; i++) { + indexTypes[i] = (String) providerIndexTypes[i][0]; + } + + StringBuilder schema = new StringBuilder("schema.vertexLabel('user')"); + StringBuilder propertyKeys = new StringBuilder(); + StringBuilder indices = new StringBuilder(); + StringBuilder vertex0 = new StringBuilder("g.addV('user')"); + StringBuilder vertex1 = new StringBuilder("g.addV('user')"); + StringBuilder vertex2 = new StringBuilder("g.addV('user')"); + StringBuilder vertex3 = new StringBuilder("g.addV('user')"); + + for (String indexType : indexTypes) { + propertyKeys.append( + String.format( + ".partitionBy('full_name_%s', Text)" + + ".property('description_%s', Text)" + + ".property('alias_%s', Text)\n", + indexType, indexType, indexType)); + + if (indexType.equals("search")) { + indices.append( + "schema.vertexLabel('user').searchIndex().by('full_name_search').asString().by('description_search').asText().by('alias_search').asString().create()\n"); + } else { + throw new UnsupportedOperationException("IndexType other than search is not supported."); + } + + vertex0.append( + String.format( + ".property('full_name_%s', 'Paul Thomas Joe').property('description_%s', 'Lives by the hospital').property('alias_%s', 'mario')", + indexType, indexType, indexType)); + vertex1.append( + String.format( + ".property('full_name_%s', 'George Bill Steve').property('description_%s', 'A cold dude').property('alias_%s', 'wario')", + indexType, indexType, indexType)); + vertex2.append( + String.format( + ".property('full_name_%s', 'James Paul Joe').property('description_%s', 'Likes to hang out').property('alias_%s', 'bowser')", + indexType, indexType, indexType)); + vertex3.append( + String.format( + ".property('full_name_%s', 'Jill Alice').property('description_%s', 'Enjoys a very nice cold coca cola').property('alias_%s', 'peach')", + indexType, indexType, indexType)); + } + + schema.append(propertyKeys).append(".create();\n").append(indices); + + return Lists.newArrayList( + schema.toString(), + vertex0.toString(), + vertex1.toString(), + vertex2.toString(), + vertex3.toString()); + } + + @BeforeClass + public static void setup() { + for (String setupQuery : textIndices()) { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); + } + + CCM_RULE.getCcmBridge().reloadCore(1, SESSION_RULE.getGraphName(), "user", true); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CqlCollectionIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CqlCollectionIT.java new file mode 100644 index 00000000000..8bc497c37db --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/CqlCollectionIT.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.dse.driver.api.core.graph.predicates.CqlCollection.contains; +import static com.datastax.dse.driver.api.core.graph.predicates.CqlCollection.containsKey; +import static com.datastax.dse.driver.api.core.graph.predicates.CqlCollection.entryEq; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.predicates.CqlCollection; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8", + description = "DSE 6.8.0 required for collection predicates support") +public class CqlCollectionIT { + + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder().withDseWorkloads("graph").build(); + + private static final SessionRule SESSION_RULE = + new CqlSessionRuleBuilder(CCM_RULE) + .withCreateGraph() + .withCoreEngine() + .withGraphProtocol("graph-binary-1.0") + .build(); + + @ClassRule public static TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); + + @BeforeClass + public static void setup() { + for (String setupQuery : createSchema()) { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(setupQuery)); + } + } + + private static Collection createSchema() { + return ImmutableList.of( + "schema.vertexLabel('software').ifNotExists().partitionBy('name', Varchar)" + + ".property('myList', listOf(Varchar))" + + ".property('mySet', setOf(Varchar))" + + ".property('myMapKeys', mapOf(Varchar, Int))" + + ".property('myMapValues', mapOf(Int, Varchar))" + + ".property('myMapEntries', mapOf(Int, Varchar))" + + ".property('myFrozenList', frozen(listOf(Varchar)))" + + ".property('myFrozenSet', frozen(setOf(Float)))" + + ".property('myFrozenMap', frozen(mapOf(Int, Varchar)))" + + ".create()", + "schema.vertexLabel('software').secondaryIndex('by_myList').ifNotExists().by('myList').create();" + + "schema.vertexLabel('software').secondaryIndex('by_mySet').ifNotExists().by('mySet').create();" + + "schema.vertexLabel('software').secondaryIndex('by_myMapKeys').ifNotExists().by('myMapKeys').indexKeys().create();" + + "schema.vertexLabel('software').secondaryIndex('by_myMapValues').ifNotExists().by('myMapValues').indexValues().create();" + + "schema.vertexLabel('software').secondaryIndex('by_myMapEntries').ifNotExists().by('myMapEntries').indexEntries().create();" + + "schema.vertexLabel('software').secondaryIndex('by_myFrozenList').ifNotExists().by('myFrozenList').indexFull().create();" + + "schema.vertexLabel('software').secondaryIndex('by_myFrozenSet').ifNotExists().by('myFrozenSet').indexFull().create();" + + "schema.vertexLabel('software').secondaryIndex('by_myFrozenMap').ifNotExists().by('myFrozenMap').indexFull().create()"); + } + + @Test + public void should_apply_contains_predicate_to_non_frozen_list() { + CqlSession session = SESSION_RULE.session(); + + List myList1 = com.google.common.collect.ImmutableList.of("apple", "banana"); + List myList2 = com.google.common.collect.ImmutableList.of("cranberry", "orange"); + + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse list 1") + .property("myList", myList1))); + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse list 2") + .property("myList", myList2))); + + assertThat(g.V().has("software", "myList", contains("apple")).values("myList").toList()) + .hasSize(1) + .contains(myList1) + .doesNotContain(myList2); + assertThat(g.V().has("software", "myList", contains("strawberry")).toList()).isEmpty(); + } + + @Test + public void should_apply_contains_predicate_to_non_frozen_set() { + CqlSession session = SESSION_RULE.session(); + + Set mySet1 = ImmutableSet.of("apple", "banana"); + Set mySet2 = ImmutableSet.of("cranberry", "orange"); + + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g.addV("software").property("name", "dse set 1").property("mySet", mySet1))); + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g.addV("software").property("name", "dse set 2").property("mySet", mySet2))); + + assertThat(g.V().has("software", "mySet", contains("apple")).values("mySet").toList()) + .hasSize(1) + .contains(mySet1) + .doesNotContain(mySet2); + assertThat(g.V().has("software", "mySet", contains("strawberry")).toList()).isEmpty(); + } + + @Test + public void should_apply_containsKey_predicate_to_non_frozen_map() { + CqlSession session = SESSION_RULE.session(); + + Map myMap1 = ImmutableMap.builder().put("id1", 1).build(); + Map myMap2 = ImmutableMap.builder().put("id2", 2).build(); + + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse map containsKey 1") + .property("myMapKeys", myMap1))); + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse map containsKey 2") + .property("myMapKeys", myMap2))); + + assertThat(g.V().has("software", "myMapKeys", containsKey("id1")).values("myMapKeys").toList()) + .hasSize(1) + .contains(myMap1) + .doesNotContain(myMap2); + assertThat(g.V().has("software", "myMapKeys", containsKey("id3")).toList()).isEmpty(); + } + + @Test + public void should_apply_containsValue_predicate_to_non_frozen_map() { + CqlSession session = SESSION_RULE.session(); + + Map myMap1 = ImmutableMap.builder().put(11, "abc").build(); + Map myMap2 = ImmutableMap.builder().put(22, "def").build(); + + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse map containsValue 1") + .property("myMapValues", myMap1))); + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse map containsValue 2") + .property("myMapValues", myMap2))); + assertThat( + g.V() + .has("software", "myMapValues", CqlCollection.containsValue("abc")) + .values("myMapValues") + .toList()) + .hasSize(1) + .contains(myMap1) + .doesNotContain(myMap2); + assertThat(g.V().has("software", "myMapValues", CqlCollection.containsValue("xyz")).toList()) + .isEmpty(); + } + + @Test + public void should_apply_entryEq_predicate_to_non_frozen_map() { + CqlSession session = SESSION_RULE.session(); + + Map myMap1 = ImmutableMap.builder().put(11, "abc").build(); + Map myMap2 = ImmutableMap.builder().put(22, "def").build(); + + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse map entryEq 1") + .property("myMapEntries", myMap1))); + session.execute( + FluentGraphStatement.newInstance( + DseGraph.g + .addV("software") + .property("name", "dse map entryEq 2") + .property("myMapEntries", myMap2))); + assertThat( + g.V() + .has("software", "myMapEntries", entryEq(11, "abc")) + .values("myMapEntries") + .toList()) + .hasSize(1) + .contains(myMap1) + .doesNotContain(myMap2); + assertThat(g.V().has("software", "myMapEntries", entryEq(11, "xyz")).toList()).isEmpty(); + assertThat(g.V().has("software", "myMapEntries", entryEq(33, "abc")).toList()).isEmpty(); + assertThat(g.V().has("software", "myMapEntries", entryEq(33, "xyz")).toList()).isEmpty(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphAuthenticationIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphAuthenticationIT.java new file mode 100644 index 00000000000..de1c23fd661 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphAuthenticationIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import java.util.concurrent.TimeUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.0", + description = "DSE 5 required for Graph") +public class GraphAuthenticationIT { + + @ClassRule + public static CustomCcmRule ccm = + CustomCcmRule.builder() + .withDseConfiguration("authentication_options.enabled", true) + .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") + .withDseWorkloads("graph") + .build(); + + @BeforeClass + public static void sleepForAuth() { + if (ccm.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { + // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + + @Test + public void should_execute_graph_query_on_authenticated_connection() { + CqlSession dseSession = + SessionUtils.newSession( + ccm, + DriverConfigLoader.programmaticBuilder() + .withString(DseDriverOption.AUTH_PROVIDER_AUTHORIZATION_ID, "") + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .build()); + + GraphNode gn = + dseSession.execute(ScriptGraphStatement.newInstance("1+1").setSystemQuery(true)).one(); + assertThat(gn).isNotNull(); + assertThat(gn.asInt()).isEqualTo(2); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphGeoSearchIndexITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphGeoSearchIndexITBase.java new file mode 100644 index 00000000000..67d0cb34d43 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphGeoSearchIndexITBase.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatCode; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.dse.driver.api.core.graph.predicates.Geo; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.assertj.core.api.Assumptions; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public abstract class GraphGeoSearchIndexITBase { + + protected abstract boolean isGraphBinary(); + + protected abstract GraphTraversalSource graphTraversalSource(); + + @DataProvider + public static Object[][] indexTypes() { + return new Object[][] {{"search"} + + // FIXME for some reason, materialized and secondary indices have decided not to work + // I get an exception saying "there is no index for this query, here is the defined + // indices: " and the list contains the indices that are needed. Mysterious. + // There may be something to do with differences in the CCMBridge adapter of the new + // driver, some changes make materialized views and secondary indices to be not + // considered for graph: + // + // , {"materialized"} + // , {"secondary"} + }; + } + + @UseDataProvider("indexTypes") + @Test + public void search_by_distance_cartesian_graphson(String indexType) { + // cartesian is not supported by graph_binary + Assumptions.assumeThat(isGraphBinary()).isFalse(); + // in cartesian geometry, the distance between POINT(30 30) and POINT(40 40) is exactly + // 14.142135623730951 + // any point further than that should be detected outside of the range. + // the vertex "Paul Thomas Joe" is at POINT(40.0001 40), and shouldn't be detected inside the + // range for classic. + + GraphTraversal traversal = + graphTraversalSource() + .V() + .has( + "user", + "pointPropWithBounds_" + indexType, + Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951)) + .values("full_name"); + assertThat(traversal.toList()).containsOnly("George Bill Steve", "Jill Alice"); + } + + @UseDataProvider("indexTypes") + @Test + public void search_by_distance_geodetic(String indexType) { + // in geodetic geometry, the distance between POINT(30 30) and POINT(40 40) is exactly + // 12.908258700131379 + // any point further than that should be detected outside of the range. + // the vertex "Paul Thomas Joe" is at POINT(40.0001 40), and shouldn't be detected inside the + // range. + GraphTraversal traversal = + graphTraversalSource() + .V() + .has( + "user", + "pointPropWithGeoBounds_" + indexType, + Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES)) + .values("full_name"); + assertThat(traversal.toList()).containsOnly("George Bill Steve", "Jill Alice"); + } + + @Test + public void + should_fail_if_geodetic_predicate_used_against_cartesian_property_with_search_index() { + + // for graph_binary cartesian properties are not supported, thus it does not fail + if (isGraphBinary()) { + assertThatCode( + () -> { + GraphTraversal traversal = + graphTraversalSource() + .V() + .has( + "user", + "pointPropWithBounds_search", + Geo.inside( + Point.fromCoordinates(30, 30), + 12.908258700131379, + Geo.Unit.DEGREES)) + .values("full_name"); + traversal.toList(); + }) + .doesNotThrowAnyException(); + } else { + try { + GraphTraversal traversal = + graphTraversalSource() + .V() + .has( + "user", + "pointPropWithBounds_search", + Geo.inside(Point.fromCoordinates(30, 30), 12.908258700131379, Geo.Unit.DEGREES)) + .values("full_name"); + traversal.toList(); + fail("Should have failed executing the traversal because the property type is incorrect"); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()) + .contains("Distance units cannot be used in queries against non-geodetic points."); + } + } + } + + @Test + public void + should_fail_if_cartesian_predicate_used_against_geodetic_property_with_search_index() { + + if (isGraphBinary()) { + try { + GraphTraversal traversal = + graphTraversalSource() + .V() + .has( + "user", + "pointPropWithGeoBounds_search", + Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951)) + .values("full_name"); + traversal.toList(); + fail("Should have failed executing the traversal because the property type is incorrect"); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()) + .contains("Predicate 'insideCartesian' is not supported on property"); + } + } else { + try { + GraphTraversal traversal = + graphTraversalSource() + .V() + .has( + "user", + "pointPropWithGeoBounds_search", + Geo.inside(Point.fromCoordinates(30, 30), 14.142135623730951)) + .values("full_name"); + traversal.toList(); + fail("Should have failed executing the traversal because the property type is incorrect"); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()) + .contains("Distance units are required for queries against geodetic points."); + } + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphPagingIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphPagingIT.java new file mode 100644 index 00000000000..01938c34e07 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphPagingIT.java @@ -0,0 +1,510 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase.Options; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; + +import com.codahale.metrics.Timer; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.dse.driver.internal.core.graph.MultiPageGraphResultSet; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.util.CountingIterator; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.net.SocketAddress; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "Graph paging requires DSE 6.8+") +@RunWith(DataProviderRunner.class) +public class GraphPagingIT { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METRICS_SESSION_ENABLED, + Collections.singletonList(DseSessionMetric.GRAPH_REQUESTS.getPath())) + .withStringList( + DefaultDriverOption.METRICS_NODE_ENABLED, + Collections.singletonList(DseNodeMetric.GRAPH_MESSAGES.getPath())) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance( + "schema.vertexLabel('person')" + + ".partitionBy('pk', Int)" + + ".clusterBy('cc', Int)" + + ".property('name', Text)" + + ".create();") + .setGraphName(SESSION_RULE.getGraphName())); + for (int i = 1; i <= 100; i++) { + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance( + String.format( + "g.addV('person').property('pk',0).property('cc',%d).property('name', '%s');", + i, "user" + i)) + .setGraphName(SESSION_RULE.getGraphName())); + } + } + + @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") + @Test + public void synchronous_paging_with_options(Options options) { + // given + DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.ENABLED); + + if (options.sizeInBytes) { + // Page sizes in bytes are not supported with graph queries + return; + } + + // when + GraphResultSet result = + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + + // then + List nodes = result.all(); + + assertThat(((CountingIterator) result.iterator()).remaining()).isZero(); + assertThat(nodes).hasSize(options.expectedRows); + for (int i = 1; i <= nodes.size(); i++) { + GraphNode node = nodes.get(i - 1); + assertThat(node.asString()).isEqualTo("user" + i); + } + assertThat(result.getRequestExecutionInfo()).isNotNull(); + assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) + .isEqualTo(firstCcmNode()); + assertIfMultiPage(result, options.expectedPages); + validateMetrics(SESSION_RULE.session()); + } + + @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") + @Test + public void synchronous_paging_with_options_when_auto(Options options) { + // given + DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.AUTO); + + if (options.sizeInBytes) { + // Page sizes in bytes are not supported with graph queries + return; + } + + // when + GraphResultSet result = + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + + // then + List nodes = result.all(); + + assertThat(((CountingIterator) result.iterator()).remaining()).isZero(); + assertThat(nodes).hasSize(options.expectedRows); + for (int i = 1; i <= nodes.size(); i++) { + GraphNode node = nodes.get(i - 1); + assertThat(node.asString()).isEqualTo("user" + i); + } + assertThat(result.getRequestExecutionInfo()).isNotNull(); + assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) + .isEqualTo(firstCcmNode()); + + assertIfMultiPage(result, options.expectedPages); + validateMetrics(SESSION_RULE.session()); + } + + private void assertIfMultiPage(GraphResultSet result, int expectedPages) { + if (result instanceof MultiPageGraphResultSet) { + assertThat(((MultiPageGraphResultSet) result).getRequestExecutionInfos()) + .hasSize(expectedPages); + assertThat(result.getRequestExecutionInfo()) + .isSameAs( + ((MultiPageGraphResultSet) result).getRequestExecutionInfos().get(expectedPages - 1)); + } + } + + @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") + @Test + public void synchronous_options_with_paging_disabled_should_fallback_to_single_page( + Options options) { + // given + DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.DISABLED); + + if (options.sizeInBytes) { + // Page sizes in bytes are not supported with graph queries + return; + } + + // when + GraphResultSet result = + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + + // then + List nodes = result.all(); + + assertThat(((CountingIterator) result.iterator()).remaining()).isZero(); + assertThat(nodes).hasSize(100); + for (int i = 1; i <= nodes.size(); i++) { + GraphNode node = nodes.get(i - 1); + assertThat(node.asString()).isEqualTo("user" + i); + } + assertThat(result.getRequestExecutionInfo()).isNotNull(); + assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) + .isEqualTo(firstCcmNode()); + validateMetrics(SESSION_RULE.session()); + } + + @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") + @Test + public void asynchronous_paging_with_options(Options options) + throws ExecutionException, InterruptedException { + // given + DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.ENABLED); + + if (options.sizeInBytes) { + // Page sizes in bytes are not supported with graph queries + return; + } + + // when + CompletionStage result = + SESSION_RULE + .session() + .executeAsync( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + + // then + checkAsyncResult(result, options, 0, 1, new ArrayList<>()); + validateMetrics(SESSION_RULE.session()); + } + + @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") + @Test + public void asynchronous_paging_with_options_when_auto(Options options) + throws ExecutionException, InterruptedException { + // given + DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.AUTO); + + if (options.sizeInBytes) { + // Page sizes in bytes are not supported with graph queries + return; + } + + // when + CompletionStage result = + SESSION_RULE + .session() + .executeAsync( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + + // then + checkAsyncResult(result, options, 0, 1, new ArrayList<>()); + validateMetrics(SESSION_RULE.session()); + } + + @UseDataProvider(location = ContinuousPagingITBase.class, value = "pagingOptions") + @Test + public void asynchronous_options_with_paging_disabled_should_fallback_to_single_page( + Options options) throws ExecutionException, InterruptedException { + // given + DriverExecutionProfile profile = enableGraphPaging(options, PagingEnabledOptions.DISABLED); + + if (options.sizeInBytes) { + // Page sizes in bytes are not supported with graph queries + return; + } + + // when + CompletionStage result = + SESSION_RULE + .session() + .executeAsync( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + + // then + AsyncGraphResultSet asyncGraphResultSet = result.toCompletableFuture().get(); + for (int i = 1; i <= 100; i++, asyncGraphResultSet.remaining()) { + GraphNode node = asyncGraphResultSet.one(); + assertThat(node.asString()).isEqualTo("user" + i); + } + assertThat(asyncGraphResultSet.remaining()).isEqualTo(0); + validateMetrics(SESSION_RULE.session()); + } + + private void checkAsyncResult( + CompletionStage future, + Options options, + int rowsFetched, + int pageNumber, + List graphExecutionInfos) + throws ExecutionException, InterruptedException { + AsyncGraphResultSet result = future.toCompletableFuture().get(); + int remaining = result.remaining(); + rowsFetched += remaining; + assertThat(remaining).isLessThanOrEqualTo(options.pageSize); + + if (options.expectedRows == rowsFetched) { + assertThat(result.hasMorePages()).isFalse(); + } else { + assertThat(result.hasMorePages()).isTrue(); + } + + int first = (pageNumber - 1) * options.pageSize + 1; + int last = (pageNumber - 1) * options.pageSize + remaining; + + for (int i = first; i <= last; i++, remaining--) { + GraphNode node = result.one(); + assertThat(node.asString()).isEqualTo("user" + i); + assertThat(result.remaining()).isEqualTo(remaining - 1); + } + + assertThat(result.remaining()).isZero(); + assertThat(result.getRequestExecutionInfo()).isNotNull(); + assertThat(result.getRequestExecutionInfo().getCoordinator().getEndPoint().resolve()) + .isEqualTo(firstCcmNode()); + + graphExecutionInfos.add(result.getRequestExecutionInfo()); + + assertThat(graphExecutionInfos).hasSize(pageNumber); + assertThat(result.getRequestExecutionInfo()).isSameAs(graphExecutionInfos.get(pageNumber - 1)); + if (pageNumber == options.expectedPages) { + assertThat(result.hasMorePages()).isFalse(); + assertThat(options.expectedRows).isEqualTo(rowsFetched); + assertThat(options.expectedPages).isEqualTo(pageNumber); + } else { + assertThat(result.hasMorePages()).isTrue(); + checkAsyncResult( + result.fetchNextPage(), options, rowsFetched, pageNumber + 1, graphExecutionInfos); + } + } + + @Test + public void should_cancel_result_set() { + // given + DriverExecutionProfile profile = + enableGraphPaging() + .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES, 1) + .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, 10); + + // when + GraphStatement statement = + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile); + MultiPageGraphResultSet results = + (MultiPageGraphResultSet) SESSION_RULE.session().execute(statement); + + assertThat(((MultiPageGraphResultSet.RowIterator) results.iterator()).isCancelled()).isFalse(); + assertThat(((CountingIterator) results.iterator()).remaining()).isEqualTo(10); + results.cancel(); + + assertThat(((MultiPageGraphResultSet.RowIterator) results.iterator()).isCancelled()).isTrue(); + assertThat(((CountingIterator) results.iterator()).remaining()).isEqualTo(10); + for (int i = 0; i < 10; i++) { + results.one(); + } + } + + @Test + public void should_trigger_global_timeout_sync_from_config() { + // given + Duration timeout = Duration.ofMillis(100); + DriverExecutionProfile profile = + enableGraphPaging().withDuration(DseDriverOption.GRAPH_TIMEOUT, timeout); + + // when + try { + CCM_RULE.getCcmBridge().pause(1); + try { + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + fail("Expecting DriverTimeoutException"); + } catch (DriverTimeoutException e) { + assertThat(e).hasMessage("Query timed out after " + timeout); + } + } finally { + CCM_RULE.getCcmBridge().resume(1); + } + } + + @Test + public void should_trigger_global_timeout_sync_from_statement() { + // given + Duration timeout = Duration.ofMillis(100); + + // when + try { + CCM_RULE.getCcmBridge().pause(1); + try { + SESSION_RULE + .session() + .execute( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setTimeout(timeout)); + fail("Expecting DriverTimeoutException"); + } catch (DriverTimeoutException e) { + assertThat(e).hasMessage("Query timed out after " + timeout); + } + } finally { + CCM_RULE.getCcmBridge().resume(1); + } + } + + @Test + public void should_trigger_global_timeout_async() throws InterruptedException { + // given + Duration timeout = Duration.ofMillis(100); + DriverExecutionProfile profile = + enableGraphPaging().withDuration(DseDriverOption.GRAPH_TIMEOUT, timeout); + + // when + try { + CCM_RULE.getCcmBridge().pause(1); + CompletionStage result = + SESSION_RULE + .session() + .executeAsync( + ScriptGraphStatement.newInstance("g.V().hasLabel('person').values('name')") + .setGraphName(SESSION_RULE.getGraphName()) + .setTraversalSource("g") + .setExecutionProfile(profile)); + result.toCompletableFuture().get(); + fail("Expecting DriverTimeoutException"); + } catch (ExecutionException e) { + assertThat(e.getCause()).hasMessage("Query timed out after " + timeout); + } finally { + CCM_RULE.getCcmBridge().resume(1); + } + } + + private DriverExecutionProfile enableGraphPaging() { + return SESSION_RULE + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withString(DseDriverOption.GRAPH_PAGING_ENABLED, PagingEnabledOptions.ENABLED.name()); + } + + private DriverExecutionProfile enableGraphPaging( + Options options, PagingEnabledOptions pagingEnabledOptions) { + return SESSION_RULE + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, options.pageSize) + .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES, options.maxPages) + .withInt( + DseDriverOption.GRAPH_CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, options.maxPagesPerSecond) + .withString(DseDriverOption.GRAPH_PAGING_ENABLED, pagingEnabledOptions.name()); + } + + private SocketAddress firstCcmNode() { + return CCM_RULE.getContactPoints().iterator().next().resolve(); + } + + private void validateMetrics(CqlSession session) { + Node node = session.getMetadata().getNodes().values().iterator().next(); + assertThat(session.getMetrics()).isPresent(); + Metrics metrics = session.getMetrics().get(); + assertThat(metrics.getNodeMetric(node, DseNodeMetric.GRAPH_MESSAGES)).isPresent(); + Timer messages = (Timer) metrics.getNodeMetric(node, DseNodeMetric.GRAPH_MESSAGES).get(); + assertThat(messages.getCount()).isGreaterThan(0); + assertThat(messages.getMeanRate()).isGreaterThan(0); + assertThat(metrics.getSessionMetric(DseSessionMetric.GRAPH_REQUESTS)).isPresent(); + Timer requests = (Timer) metrics.getSessionMetric(DseSessionMetric.GRAPH_REQUESTS).get(); + assertThat(requests.getCount()).isGreaterThan(0); + assertThat(requests.getMeanRate()).isGreaterThan(0); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphSpeculativeExecutionIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphSpeculativeExecutionIT.java new file mode 100644 index 00000000000..130e9a17cc1 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphSpeculativeExecutionIT.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; +import com.datastax.oss.driver.internal.core.specex.NoSpeculativeExecutionPolicy; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.time.Duration; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8 required for graph paging") +@RunWith(DataProviderRunner.class) +public class GraphSpeculativeExecutionIT { + + @ClassRule + public static CustomCcmRule ccmRule = CustomCcmRule.builder().withDseWorkloads("graph").build(); + + @Test + @UseDataProvider("idempotenceAndSpecExecs") + public void should_use_speculative_executions_when_enabled( + boolean defaultIdempotence, + Boolean statementIdempotence, + Class speculativeExecutionClass, + boolean expectSpeculativeExecutions) { + + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(ccmRule.getContactPoints()) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withBoolean( + DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, defaultIdempotence) + .withInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, 10) + .withClass( + DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, + speculativeExecutionClass) + .withDuration( + DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, Duration.ofMillis(10)) + .withString(DseDriverOption.GRAPH_PAGING_ENABLED, "ENABLED") + .build()) + .build()) { + + ScriptGraphStatement statement = + ScriptGraphStatement.newInstance( + "java.util.concurrent.TimeUnit.MILLISECONDS.sleep(1000L);") + .setIdempotent(statementIdempotence); + + GraphResultSet result = session.execute(statement); + int speculativeExecutionCount = + result.getRequestExecutionInfo().getSpeculativeExecutionCount(); + if (expectSpeculativeExecutions) { + assertThat(speculativeExecutionCount).isGreaterThan(0); + } else { + assertThat(speculativeExecutionCount).isEqualTo(0); + } + } + } + + @DataProvider + public static Object[][] idempotenceAndSpecExecs() { + return new Object[][] { + new Object[] {false, false, NoSpeculativeExecutionPolicy.class, false}, + new Object[] {false, true, NoSpeculativeExecutionPolicy.class, false}, + new Object[] {false, null, NoSpeculativeExecutionPolicy.class, false}, + new Object[] {true, false, NoSpeculativeExecutionPolicy.class, false}, + new Object[] {true, true, NoSpeculativeExecutionPolicy.class, false}, + new Object[] {true, null, NoSpeculativeExecutionPolicy.class, false}, + new Object[] {false, false, ConstantSpeculativeExecutionPolicy.class, false}, + new Object[] {false, true, ConstantSpeculativeExecutionPolicy.class, true}, + new Object[] {false, null, ConstantSpeculativeExecutionPolicy.class, false}, + new Object[] {true, false, ConstantSpeculativeExecutionPolicy.class, false}, + new Object[] {true, true, ConstantSpeculativeExecutionPolicy.class, true}, + new Object[] {true, null, ConstantSpeculativeExecutionPolicy.class, true}, + }; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTestSupport.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTestSupport.java new file mode 100644 index 00000000000..6508be38175 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTestSupport.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.dse.driver.internal.core.graph.GraphProtocol; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; + +/** Utility for creating commonly used Rule builders for tests. */ +public class GraphTestSupport { + + /** CCM Rule builder for Graph Data Type tests. */ + public static final CustomCcmRule.Builder CCM_BUILDER_WITH_GRAPH = + CustomCcmRule.builder() + .withDseWorkloads("graph") + .withDseConfiguration("graph.max_query_params", 32) + .withDseConfiguration( + "graph.gremlin_server.scriptEngines.gremlin-groovy.config.sandbox_enabled", "false"); + + /** CCM Rule builder for general Graph workload tests. */ + public static final CustomCcmRule.Builder GRAPH_CCM_RULE_BUILDER = + CustomCcmRule.builder().withDseWorkloads("graph"); + + /** + * Creates a session rule builder for Classic Graph workloads with the default Graph protocol. The + * default GraphProtocol for Classic Graph: GraphSON 2.0. + * + * @param ccmRule CustomCcmRule configured for Graph workloads + * @return A Session rule builder configured for Classic Graph workloads + */ + public static CqlSessionRuleBuilder getClassicGraphSessionBuilder(CustomCcmRule ccmRule) { + return new CqlSessionRuleBuilder(ccmRule) + .withCreateGraph() + .withGraphProtocol(GraphProtocol.GRAPHSON_2_0.toInternalCode()); + } + + /** + * Creates a session rule builder for Core Graph workloads with the default Graph protocol. The + * default GraphProtocol for Core Graph: Graph Binary 1.0. + * + * @param ccmRule CustomCcmRule configured for Graph workloads + * @return A Session rule builder configured for Core Graph workloads + */ + public static CqlSessionRuleBuilder getCoreGraphSessionBuilder(CustomCcmRule ccmRule) { + return new CqlSessionRuleBuilder(ccmRule) + .withCreateGraph() + .withCoreEngine() + .withGraphProtocol(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTextSearchIndexITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTextSearchIndexITBase.java new file mode 100644 index 00000000000..d70d206715e --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTextSearchIndexITBase.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.predicates.Search; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public abstract class GraphTextSearchIndexITBase { + + protected abstract boolean isGraphBinary(); + + protected abstract GraphTraversalSource graphTraversalSource(); + + @DataProvider + public static Object[][] indexTypes() { + return new Object[][] {{"search"} + + // FIXME for some reason, materialized and secondary indices have decided not to work + // I get an exception saying "there is no index for this query, here is the defined + // indices: " and the list contains the indices that are needed. Mysterious. + // There may be something to do with differences in the CCMBridge adapter of the new + // driver, some changes make materialized views and secondary indices to be not + // considered for graph: + // + // , {"materialized"} + // , {"secondary"} + }; + } + + /** + * Validates that a graph traversal can be made by using a Search prefix predicate on an indexed + * property of the given type. + * + *

        Finds all 'user' vertices having a 'full_name' property beginning with 'Paul'. + * + * @test_category dse:graph + */ + @UseDataProvider("indexTypes") + @Test + public void search_by_prefix_search(String indexType) { + // Only one user with full_name starting with Paul. + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "full_name_" + indexType, Search.prefix("Paul")) + .values("full_name_" + indexType); + assertThat(traversal.toList()).containsOnly("Paul Thomas Joe"); + } + + /** + * Validates that a graph traversal can be made by using a Search regex predicate on an indexed + * property of the given type. + * + *

        Finds all 'user' vertices having a 'full_name' property matching regex '.*Paul.*'. + * + * @test_category dse:graph + */ + @UseDataProvider("indexTypes") + @Test + public void search_by_regex(String indexType) { + // Only two people with names containing pattern for Paul. + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "full_name_" + indexType, Search.regex(".*Paul.*")) + .values("full_name_" + indexType); + assertThat(traversal.toList()).containsOnly("Paul Thomas Joe", "James Paul Joe"); + } + + /** + * Validates that a graph traversal can be made by using a Search fuzzy predicate on an indexed + * property of the given type. + * + *

        Finds all 'user' vertices having a 'alias' property matching 'awrio' with a fuzzy distance + * of 1. + * + * @test_category dse:graph + */ + @UseDataProvider("indexTypes") + @Test + @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") + public void search_by_fuzzy(String indexType) { + // Alias matches 'awrio' fuzzy + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "alias_" + indexType, Search.fuzzy("awrio", 1)) + .values("full_name_" + indexType); + // Should not match 'Paul Thomas Joe' since alias is 'mario', which is at distance 2 of 'awrio' + // (a -> m, w -> a) + // Should match 'George Bill Steve' since alias is 'wario' witch matches 'awrio' within a + // distance of 1 (transpose w with a). + assertThat(traversal.toList()).containsOnly("George Bill Steve"); + } + + /** + * Validates that a graph traversal can be made by using a Search token predicate on an indexed + * property of the given type. + * + *

        Finds all 'user' vertices having a 'description' property containing the token 'cold'. + * + * @test_category dse:graph + */ + @UseDataProvider("indexTypes") + @Test + public void search_by_token(String indexType) { + // Description containing token 'cold' + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "description_" + indexType, Search.token("cold")) + .values("full_name_" + indexType); + assertThat(traversal.toList()).containsOnly("Jill Alice", "George Bill Steve"); + } + + /** + * Validates that a graph traversal can be made by using a Search token prefix predicate on an + * indexed property of the given type. + * + *

        Finds all 'user' vertices having a 'description' containing the token prefix 'h'. + */ + @UseDataProvider("indexTypes") + @Test + public void search_by_token_prefix(String indexType) { + // Description containing a token starting with h + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "description_" + indexType, Search.tokenPrefix("h")) + .values("full_name_" + indexType); + assertThat(traversal.toList()).containsOnly("Paul Thomas Joe", "James Paul Joe"); + } + + /** + * Validates that a graph traversal can be made by using a Search token regex predicate on an + * indexed property of the given type. + * + *

        Finds all 'user' vertices having a 'description' containing the token regex + * '(nice|hospital)'. + */ + @UseDataProvider("indexTypes") + @Test + public void search_by_token_regex(String indexType) { + // Description containing nice or hospital + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "description_" + indexType, Search.tokenRegex("(nice|hospital)")) + .values("full_name_" + indexType); + assertThat(traversal.toList()).containsOnly("Paul Thomas Joe", "Jill Alice"); + } + + /** + * Validates that a graph traversal can be made by using a Search fuzzy predicate on an indexed + * property of the given type. + * + *

        Finds all 'user' vertices having a 'description' property matching 'lieks' with a fuzzy + * distance of 1. + * + * @test_category dse:graph + */ + @UseDataProvider("indexTypes") + @Test + @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") + public void search_by_token_fuzzy(String indexType) { + // Description containing 'lives' fuzzy + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "description_" + indexType, Search.tokenFuzzy("lieks", 1)) + .values("full_name_" + indexType); + // Should not match 'Paul Thomas Joe' since description contains 'Lives' which is at distance of + // 2 (e -> v, k -> e) + // Should match 'James Paul Joe' since description contains 'Likes' (transpose e for k) + assertThat(traversal.toList()).containsOnly("James Paul Joe"); + } + + /** + * Validates that a graph traversal can be made by using a Search phrase predicate on an indexed + * property of the given type. + * + *

        Finds all 'user' vertices having a 'description' property matching 'a cold' with a distance + * of 2. + * + * @test_category dse:graph + */ + @UseDataProvider("indexTypes") + @Test + @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") + public void search_by_phrase(String indexType) { + // Full name contains phrase "Paul Joe" + GraphTraversal traversal = + graphTraversalSource() + .V() + .has("user", "description_" + indexType, Search.phrase("a cold", 2)) + .values("full_name_" + indexType); + // Should match 'George Bill Steve' since 'A cold dude' is at distance of 0 for 'a cold'. + // Should match 'Jill Alice' since 'Enjoys a very nice cold coca cola' is at distance of 2 for + // 'a cold'. + assertThat(traversal.toList()).containsOnly("George Bill Steve", "Jill Alice"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTimeoutsIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTimeoutsIT.java new file mode 100644 index 00000000000..d2b58cc0f9c --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/GraphTimeoutsIT.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static com.datastax.dse.driver.api.core.graph.ScriptGraphStatement.newInstance; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.time.Duration; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.0", + description = "DSE 5 required for Graph") +public class GraphTimeoutsIT { + + public static CustomCcmRule ccmRule = CustomCcmRule.builder().withDseWorkloads("graph").build(); + + public static SessionRule sessionRule = + SessionRule.builder(ccmRule).withCreateGraph().build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Test + public void should_have_driver_wait_indefinitely_by_default_and_server_return_timeout_response() { + Duration serverTimeout = Duration.ofSeconds(1); + + DriverExecutionProfile drivertest1 = + sessionRule + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, "drivertest1"); + + // We could have done with the server's default but it's 30 secs so the test would have taken at + // least that time. So we simulate a server timeout change. + sessionRule + .session() + .execute( + newInstance( + "graph.schema().config().option(\"graph.traversal_sources.drivertest1.evaluation_timeout\").set('" + + serverTimeout.toMillis() + + " ms')") + .setExecutionProfile(drivertest1)); + + try { + // The driver should wait indefinitely, but the server should timeout first. + sessionRule + .session() + .execute( + newInstance("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1") + .setExecutionProfile(drivertest1)); + fail("The request should have timed out"); + } catch (InvalidQueryException e) { + assertThat(e) + .hasMessageContainingAll( + "evaluation exceeded", + "threshold of ", + Long.toString(serverTimeout.toMillis()), + "ms"); + } + } + + @Test + public void should_not_take_into_account_request_timeout_if_more_than_server_timeout() { + Duration serverTimeout = Duration.ofSeconds(1); + Duration clientTimeout = Duration.ofSeconds(10); + + DriverExecutionProfile drivertest2 = + sessionRule + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, "drivertest2") + .withDuration(DseDriverOption.GRAPH_TIMEOUT, clientTimeout); + + sessionRule + .session() + .execute( + newInstance( + "graph.schema().config().option(\"graph.traversal_sources.drivertest2.evaluation_timeout\").set('" + + serverTimeout.toMillis() + + " ms')") + .setExecutionProfile(drivertest2)); + + try { + // The driver should wait 32 secs, but the server should timeout first. + sessionRule + .session() + .execute( + newInstance("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1") + .setExecutionProfile(drivertest2)); + fail("The request should have timed out"); + } catch (InvalidQueryException e) { + assertThat(e) + .hasMessageContainingAll( + "evaluation exceeded", + "threshold of ", + Long.toString(serverTimeout.toMillis()), + "ms"); + } + } + + @Test + public void should_take_into_account_request_timeout_if_less_than_server_timeout() { + Duration serverTimeout = Duration.ofSeconds(10); + Duration clientTimeout = Duration.ofSeconds(1); + + DriverExecutionProfile drivertest3 = + sessionRule + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, "drivertest3"); + + // We could have done with the server's default but it's 30 secs so the test would have taken at + // least that time. Also, we don't want to rely on server's default. So we simulate a server + // timeout change. + sessionRule + .session() + .execute( + ScriptGraphStatement.newInstance( + "graph.schema().config().option(\"graph.traversal_sources.drivertest3.evaluation_timeout\").set('" + + serverTimeout.toMillis() + + " ms')") + .setExecutionProfile(drivertest3)); + + try { + // The timeout on the request is lower than what's defined server side, so it should be taken + // into account. + sessionRule + .session() + .execute( + ScriptGraphStatement.newInstance( + "java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1") + .setExecutionProfile( + drivertest3.withDuration(DseDriverOption.GRAPH_TIMEOUT, clientTimeout))); + fail("The request should have timed out"); + // Since the driver sends its timeout in the request payload, server timeout will be equal to + // client timeout for this request. We cannot know for sure if it will be a client timeout + // error, or a server timeout, and during tests, both happened and not deterministically. + } catch (DriverTimeoutException e) { + assertThat(e).hasMessage("Query timed out after " + clientTimeout); + } catch (InvalidQueryException e) { + assertThat(e) + .hasMessageContainingAll( + "evaluation exceeded", + "threshold of ", + Long.toString(clientTimeout.toMillis()), + "ms"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SampleGraphScripts.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SampleGraphScripts.java new file mode 100644 index 00000000000..19ff957736a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SampleGraphScripts.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +public class SampleGraphScripts { + + public static final String MAKE_STRICT = + "schema.config().option('graph.schema_mode').set('production');\n"; + + public static final String MAKE_NOT_STRICT = + "schema.config().option('graph.schema_mode').set('development');\n"; + + public static final String ALLOW_SCANS = + "schema.config().option('graph.allow_scan').set('true');\n"; + + private static final String CLASSIC_SCHEMA = + "schema.propertyKey('name').Text().ifNotExists().create();\n" + + "schema.propertyKey('age').Int().ifNotExists().create();\n" + + "schema.propertyKey('lang').Text().ifNotExists().create();\n" + + "schema.propertyKey('weight').Float().ifNotExists().create();\n" + + "schema.vertexLabel('person').properties('name', 'age').ifNotExists().create();\n" + + "schema.vertexLabel('software').properties('name', 'lang').ifNotExists().create();\n" + + "schema.edgeLabel('created').properties('weight').connection('person', 'software').ifNotExists().create();\n" + + "schema.edgeLabel('knows').properties('weight').connection('person', 'person').ifNotExists().create();\n"; + + private static final String INSERT_DATA = + "marko = g.addV('person').property('name', 'marko').property('age', 29).next();\n" + + "vadas = g.addV('person').property('name', 'vadas').property('age', 27).next();\n" + + "josh = g.addV('person').property('name', 'josh').property('age', 32).next();\n" + + "peter = g.addV('person').property('name', 'peter').property('age', 35).next();\n" + + "lop = g.addV('software').property('name', 'lop').property('lang', 'java').next();\n" + + "ripple = g.addV('software').property('name', 'ripple').property('lang', 'java').next();\n" + + "g.V().has('name', 'marko').as('marko').V().has('name', 'vadas').as('vadas').addE('knows').from('marko').property('weight', 0.5f).next();\n" + + "g.V().has('name', 'marko').as('marko').V().has('name', 'josh').as('josh').addE('knows').from('marko').property('weight', 1.0f).next();\n" + + "g.V().has('name', 'marko').as('marko').V().has('name', 'lop').as('lop').addE('created').from('marko').property('weight', 0.4f).next();\n" + + "g.V().has('name', 'josh').as('josh').V().has('name', 'ripple').as('ripple').addE('created').from('josh').property('weight', 1.0f).next();\n" + + "g.V().has('name', 'josh').as('josh').V().has('name', 'lop').as('lop').addE('created').from('josh').property('weight', 0.4f).next();\n" + + "g.V().has('name', 'peter').as('peter').V().has('name', 'lop').as('lop').addE('created').from('peter').property('weight', 0.2f);"; + + public static String CLASSIC_GRAPH = CLASSIC_SCHEMA + INSERT_DATA; + + private static final String CORE_SCHEMA = + "schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();\n" + + "schema.vertexLabel('software').ifNotExists().partitionBy('name', Text).property('lang', Text).create();\n" + + "schema.edgeLabel('created').ifNotExists().from('person').to('software').property('weight', Float).create();\n" + + "schema.edgeLabel('knows').ifNotExists().from('person').to('person').property('weight', Float).create();\n"; + + public static String CORE_GRAPH = CORE_SCHEMA + INSERT_DATA; +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalDsl.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalDsl.java new file mode 100644 index 00000000000..327f32a240d --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalDsl.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import org.apache.tinkerpop.gremlin.process.traversal.dsl.GremlinDsl; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Vertex; + +@GremlinDsl(traversalSource = "com.datastax.dse.driver.api.core.graph.SocialTraversalSourceDsl") +public interface SocialTraversalDsl extends GraphTraversal.Admin { + public default GraphTraversal knows(String personName) { + return out("knows").hasLabel("person").has("name", personName).in(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalSourceDsl.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalSourceDsl.java new file mode 100644 index 00000000000..e61b94e2d09 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/SocialTraversalSourceDsl.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import org.apache.tinkerpop.gremlin.process.remote.RemoteConnection; +import org.apache.tinkerpop.gremlin.process.traversal.P; +import org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategies; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.DefaultGraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.step.map.GraphStep; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Vertex; + +public class SocialTraversalSourceDsl extends GraphTraversalSource { + + public SocialTraversalSourceDsl( + final Graph graph, final TraversalStrategies traversalStrategies) { + super(graph, traversalStrategies); + } + + public SocialTraversalSourceDsl(final Graph graph) { + super(graph); + } + + public SocialTraversalSourceDsl(RemoteConnection connection) { + super(connection); + } + + public GraphTraversal persons(String... names) { + GraphTraversalSource clone = this.clone(); + + // Manually add a "start" step for the traversal in this case the equivalent of V(). GraphStep + // is marked + // as a "start" step by passing "true" in the constructor. + clone.getBytecode().addStep(GraphTraversal.Symbols.V); + GraphTraversal traversal = new DefaultGraphTraversal<>(clone); + traversal.asAdmin().addStep(new GraphStep(traversal.asAdmin(), Vertex.class, true)); + + traversal = traversal.hasLabel("person"); + if (names.length > 0) traversal = traversal.has("name", P.within(names)); + + return traversal; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerEdgeAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerEdgeAssert.java new file mode 100644 index 00000000000..e3cc8cb687b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerEdgeAssert.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import com.datastax.oss.driver.assertions.Assertions; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Vertex; + +public class TinkerEdgeAssert extends TinkerElementAssert { + + public TinkerEdgeAssert(Edge actual) { + super(actual, TinkerEdgeAssert.class); + } + + public TinkerEdgeAssert hasInVLabel(String label) { + Assertions.assertThat(actual.inVertex().label()).isEqualTo(label); + return myself; + } + + public TinkerEdgeAssert hasOutVLabel(String label) { + Assertions.assertThat(actual.outVertex().label()).isEqualTo(label); + return myself; + } + + public TinkerEdgeAssert hasOutV(Vertex vertex) { + Assertions.assertThat(actual.outVertex()).isEqualTo(vertex); + return myself; + } + + public TinkerEdgeAssert hasInV(Vertex vertex) { + Assertions.assertThat(actual.inVertex()).isEqualTo(vertex); + return myself; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerElementAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerElementAssert.java new file mode 100644 index 00000000000..f54cec3065a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerElementAssert.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.tinkerpop.gremlin.structure.Element; +import org.assertj.core.api.AbstractAssert; + +public abstract class TinkerElementAssert, A extends Element> + extends AbstractAssert { + + protected TinkerElementAssert(A actual, Class selfType) { + super(actual, selfType); + } + + public S hasId(Object id) { + assertThat(actual.id()).isEqualTo(id); + return myself; + } + + public S hasLabel(String label) { + assertThat(actual.label()).isEqualTo(label); + return myself; + } + + public S hasProperty(String propertyName) { + assertThat(actual.property(propertyName).isPresent()).isTrue(); + return myself; + } + + public S hasProperty(String propertyName, Object value) { + hasProperty(propertyName); + assertThat(actual.property(propertyName).value()).isEqualTo(value); + return myself; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerGraphAssertions.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerGraphAssertions.java new file mode 100644 index 00000000000..9c38a58db4c --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerGraphAssertions.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; + +public class TinkerGraphAssertions extends com.datastax.oss.driver.assertions.Assertions { + + public static TinkerEdgeAssert assertThat(Edge edge) { + return new TinkerEdgeAssert(edge); + } + + public static TinkerVertexAssert assertThat(Vertex vertex) { + return new TinkerVertexAssert(vertex); + } + + public static TinkerVertexPropertyAssert assertThat(VertexProperty vertexProperty) { + return new TinkerVertexPropertyAssert(vertexProperty); + } + + public static TinkerPathAssert assertThat(Path path) { + return new TinkerPathAssert(path); + } + + public static TinkerTreeAssert assertThat(Tree tree) { + return new TinkerTreeAssert<>(tree); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerPathAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerPathAssert.java new file mode 100644 index 00000000000..30f8f5fffa4 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerPathAssert.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.assertj.core.api.AbstractAssert; +import org.assertj.core.api.AbstractObjectAssert; + +public class TinkerPathAssert extends AbstractAssert { + + public TinkerPathAssert(Path actual) { + super(actual, TinkerPathAssert.class); + } + + /** + * Ensures that the given Path matches one of the exact traversals we'd expect for a person whom + * Marko knows that has created software and what software that is. + * + *

        These paths should be: + * + *

          + *
        • marko -> knows -> josh -> created -> lop + *
        • marko -> knows -> josh -> created -> ripple + *
        + */ + public static void validatePathObjects(Path path) { + + // marko should be the origin point. + TinkerGraphAssertions.assertThat(path).vertexAt(0).hasLabel("person"); + + // there should be a 'knows' outgoing relationship between marko and josh. + TinkerGraphAssertions.assertThat(path) + .edgeAt(1) + .hasLabel("knows") + .hasOutVLabel("person") + .hasOutV((Vertex) path.objects().get(0)) + .hasInVLabel("person") + .hasInV((Vertex) path.objects().get(2)); + + // josh... + TinkerGraphAssertions.assertThat(path).vertexAt(2).hasLabel("person"); + + // there should be a 'created' relationship between josh and lop. + TinkerGraphAssertions.assertThat(path) + .edgeAt(3) + .hasLabel("created") + .hasOutVLabel("person") + .hasOutV((Vertex) path.objects().get(2)) + .hasInVLabel("software") + .hasInV((Vertex) path.objects().get(4)); + + // lop.. + TinkerGraphAssertions.assertThat(path).vertexAt(4).hasLabel("software"); + } + + public AbstractObjectAssert objectAt(int i) { + assertThat(actual.size()).isGreaterThanOrEqualTo(i); + return assertThat(actual.objects().get(i)); + } + + public TinkerVertexAssert vertexAt(int i) { + assertThat(actual.size()).isGreaterThanOrEqualTo(i); + Object o = actual.objects().get(i); + assertThat(o).isInstanceOf(Vertex.class); + return new TinkerVertexAssert((Vertex) o); + } + + public TinkerEdgeAssert edgeAt(int i) { + assertThat(actual.size()).isGreaterThanOrEqualTo(i); + Object o = actual.objects().get(i); + assertThat(o).isInstanceOf(Edge.class); + return new TinkerEdgeAssert((Edge) o); + } + + public TinkerPathAssert hasLabel(int i, String... labels) { + assertThat(actual.labels().size()).isGreaterThanOrEqualTo(i); + assertThat(actual.labels().get(i)).containsExactly(labels); + return myself; + } + + public TinkerPathAssert hasNoLabel(int i) { + assertThat(actual.labels().size()).isGreaterThanOrEqualTo(i); + assertThat(actual.labels().get(i)).isEmpty(); + return myself; + } + + public TinkerPathAssert doesNotHaveLabel(String label) { + assertThat(actual.hasLabel(label)).isFalse(); + return myself; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerTreeAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerTreeAssert.java new file mode 100644 index 00000000000..6196e0a1021 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerTreeAssert.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; +import org.assertj.core.api.MapAssert; + +public class TinkerTreeAssert extends MapAssert> { + + public TinkerTreeAssert(Tree actual) { + super(actual); + } + + public TinkerTreeAssert hasTree(T key) { + assertThat(actual).containsKey(key); + return this; + } + + public TinkerTreeAssert isLeaf() { + assertThat(actual).hasSize(0); + return this; + } + + public TinkerTreeAssert tree(T key) { + hasTree(key); + return new TinkerTreeAssert<>(actual.get(key)); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexAssert.java new file mode 100644 index 00000000000..3777fc8e96a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexAssert.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.tinkerpop.gremlin.structure.Property; +import org.apache.tinkerpop.gremlin.structure.Vertex; + +public class TinkerVertexAssert extends TinkerElementAssert { + + public TinkerVertexAssert(Vertex actual) { + super(actual, TinkerVertexAssert.class); + } + + @Override + public TinkerVertexAssert hasProperty(String propertyName) { + assertThat(actual.properties(propertyName)).toIterable().isNotEmpty(); + return myself; + } + + @Override + public TinkerVertexAssert hasProperty(String propertyName, Object value) { + hasProperty(propertyName); + assertThat(actual.properties(propertyName)) + .toIterable() + .extracting(Property::value) + .contains(value); + return myself; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexPropertyAssert.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexPropertyAssert.java new file mode 100644 index 00000000000..40bb8b5e239 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/TinkerVertexPropertyAssert.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.tinkerpop.gremlin.structure.Element; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; + +public class TinkerVertexPropertyAssert + extends TinkerElementAssert, VertexProperty> { + + public TinkerVertexPropertyAssert(VertexProperty actual) { + super(actual, TinkerVertexPropertyAssert.class); + } + + public TinkerVertexPropertyAssert hasKey(String key) { + assertThat(actual.key()).isEqualTo(key); + return this; + } + + public TinkerVertexPropertyAssert hasParent(Element parent) { + assertThat(actual.element()).isEqualTo(parent); + return this; + } + + public TinkerVertexPropertyAssert hasValue(T value) { + assertThat(actual.value()).isEqualTo(value); + return this; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/reactive/DefaultReactiveGraphResultSetIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/reactive/DefaultReactiveGraphResultSetIT.java new file mode 100644 index 00000000000..b4c8bb05df4 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/reactive/DefaultReactiveGraphResultSetIT.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.reactive; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.internal.core.graph.GraphProtocol; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import io.reactivex.Flowable; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "Graph paging requires DSE 6.8+") +@RunWith(DataProviderRunner.class) +public class DefaultReactiveGraphResultSetIT { + + private static CustomCcmRule ccmRule = CustomCcmRule.builder().withDseWorkloads("graph").build(); + + private static SessionRule sessionRule = + SessionRule.builder(ccmRule) + .withCreateGraph() + .withCoreEngine() + .withGraphProtocol(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()) + .build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @BeforeClass + public static void setupSchema() { + sessionRule + .session() + .execute( + ScriptGraphStatement.newInstance( + "schema.vertexLabel('person')" + + ".partitionBy('pk', Int)" + + ".clusterBy('cc', Int)" + + ".property('name', Text)" + + ".create();") + .setGraphName(sessionRule.getGraphName())); + for (int i = 1; i <= 1000; i++) { + sessionRule + .session() + .execute( + ScriptGraphStatement.newInstance( + String.format( + "g.addV('person').property('pk',0).property('cc',%d).property('name', '%s');", + i, "user" + i)) + .setGraphName(sessionRule.getGraphName())); + } + } + + @Test + @DataProvider( + value = {"1", "10", "100", "999", "1000", "1001", "2000"}, + format = "%m [page size %p[0]]") + public void should_retrieve_all_rows(int pageSize) { + DriverExecutionProfile profile = + sessionRule + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.GRAPH_CONTINUOUS_PAGING_PAGE_SIZE, pageSize); + ScriptGraphStatement statement = + ScriptGraphStatement.builder("g.V()").setExecutionProfile(profile).build(); + ReactiveGraphResultSet rs = sessionRule.session().executeReactive(statement); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results.size()).isEqualTo(1000); + Set expectedExecInfos = new LinkedHashSet<>(); + for (ReactiveGraphNode row : results) { + assertThat(row.getExecutionInfo()).isNotNull(); + assertThat(row.isVertex()).isTrue(); + expectedExecInfos.add(row.getExecutionInfo()); + } + List execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + // DSE may send an empty page as it can't always know if it's done paging or not yet. + // See: CASSANDRA-8871. In this case, this page's execution info appears in + // rs.getExecutionInfos(), but is not present in expectedExecInfos since the page did not + // contain any rows. + assertThat(execInfos).containsAll(expectedExecInfos); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphDataTypeRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphDataTypeRemoteIT.java new file mode 100644 index 00000000000..b57d7a952bc --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphDataTypeRemoteIT.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import com.datastax.dse.driver.api.core.graph.ClassicGraphDataTypeITBase; +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.3", + description = "DSE 5.0.3 required for remote TinkerPop support") +public class ClassicGraphDataTypeRemoteIT extends ClassicGraphDataTypeITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); + } + + @Override + public CqlSession session() { + return SESSION_RULE.session(); + } + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); + + @Override + public Vertex insertVertexAndReturn(String vertexLabel, String propertyName, Object value) { + return g.addV(vertexLabel).property(propertyName, value).next(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphTraversalRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphTraversalRemoteIT.java new file mode 100644 index 00000000000..de85b6af267 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/ClassicGraphTraversalRemoteIT.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import static org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal; + +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.9", + description = "DSE 5.0.9 required for inserting edges and vertices script.") +public class ClassicGraphTraversalRemoteIT extends GraphTraversalRemoteITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CLASSIC_GRAPH)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected boolean isGraphBinary() { + return false; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return traversal().withRemote(DseGraph.remoteConnectionBuilder(session()).build()); + } + + @Override + protected SocialTraversalSource socialTraversalSource() { + return traversal(SocialTraversalSource.class) + .withRemote(DseGraph.remoteConnectionBuilder(session()).build()); + } + + @Override + protected CustomCcmRule ccmRule() { + return CCM_RULE; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphDataTypeRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphDataTypeRemoteIT.java new file mode 100644 index 00000000000..88b9cdc0433 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphDataTypeRemoteIT.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.unfold; + +import com.datastax.dse.driver.api.core.graph.CoreGraphDataTypeITBase; +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +@RunWith(DataProviderRunner.class) +public class CoreGraphDataTypeRemoteIT extends CoreGraphDataTypeITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected String graphName() { + return SESSION_RULE.getGraphName(); + } + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(session()).build()); + + @Override + public Map insertVertexThenReadProperties( + Map properties, int vertexID, String vertexLabel) { + GraphTraversal traversal = g.addV(vertexLabel).property("id", vertexID); + + for (Map.Entry entry : properties.entrySet()) { + String typeDefinition = entry.getKey(); + String propName = formatPropertyName(typeDefinition); + Object value = entry.getValue(); + traversal = traversal.property(propName, value); + } + + // insert vertex + traversal.iterate(); + + // query properties + return g.V().has(vertexLabel, "id", vertexID).valueMap().by(unfold()).next(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphTraversalRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphTraversalRemoteIT.java new file mode 100644 index 00000000000..2bbdf1f6d45 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/CoreGraphTraversalRemoteIT.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import static org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal; + +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8", + description = "DSE 6.8 required for Core graph support") +public class CoreGraphTraversalRemoteIT extends GraphTraversalRemoteITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CORE_GRAPH)); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected boolean isGraphBinary() { + return true; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return traversal() + .withRemote(DseGraph.remoteConnectionBuilder(session()).build()) + .with("allow-filtering"); + } + + @Override + protected SocialTraversalSource socialTraversalSource() { + return traversal(SocialTraversalSource.class) + .withRemote(DseGraph.remoteConnectionBuilder(session()).build()) + .with("allow-filtering"); + } + + @Override + protected CustomCcmRule ccmRule() { + return CCM_RULE; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMetaPropertiesRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMetaPropertiesRemoteIT.java new file mode 100644 index 00000000000..2966fb44cf9 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMetaPropertiesRemoteIT.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +// INFO: meta props are going away in NGDG + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.3", + description = "DSE 5.0.3 required for remote TinkerPop support") +public class GraphTraversalMetaPropertiesRemoteIT { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); + + /** Builds a simple schema that provides for a vertex with a property with sub properties. */ + public static final String META_PROPS = + MAKE_STRICT + + ALLOW_SCANS + + "schema.propertyKey('sub_prop').Text().create()\n" + + "schema.propertyKey('sub_prop2').Text().create()\n" + + "schema.propertyKey('meta_prop').Text().properties('sub_prop', 'sub_prop2').create()\n" + + "schema.vertexLabel('meta_v').properties('meta_prop').create()"; + + /** + * Ensures that a traversal that yields a vertex with a property that has its own properties that + * is appropriately parsed and made accessible via {@link VertexProperty#property}. + * + * @test_category dse:graph + */ + @Test + public void should_parse_meta_properties() { + // given a schema that defines meta properties. + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(META_PROPS)); + + // when adding a vertex with that meta property + Vertex v = + g.addV("meta_v") + .property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") + .next(); + + // then the created vertex should have the meta prop present with its sub properties. + assertThat(v).hasProperty("meta_prop"); + VertexProperty metaProp = v.property("meta_prop"); + assertThat(metaProp) + .hasValue("hello") + .hasProperty("sub_prop", "hi") + .hasProperty("sub_prop2", "hi2"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMultiPropertiesRemoteIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMultiPropertiesRemoteIT.java new file mode 100644 index 00000000000..c55a7b67c4a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalMultiPropertiesRemoteIT.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.util.Iterator; +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +// INFO: multi props are not supported in Core +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.3", + description = "DSE 5.0.3 required for remote TinkerPop support") +public class GraphTraversalMultiPropertiesRemoteIT { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = + AnonymousTraversalSource.traversal() + .withRemote(DseGraph.remoteConnectionBuilder(SESSION_RULE.session()).build()); + + /** Builds a simple schema that provides for a vertex with a multi-cardinality property. */ + public static final String MULTI_PROPS = + MAKE_STRICT + + ALLOW_SCANS + + "schema.propertyKey('multi_prop').Text().multiple().create()\n" + + "schema.vertexLabel('multi_v').properties('multi_prop').create()\n"; + + /** + * Ensures that a traversal that yields a vertex with a property name that is present multiple + * times that the properties are parsed and made accessible via {@link + * Vertex#properties(String...)}. + * + * @test_category dse:graph + */ + @Test + public void should_parse_multiple_cardinality_properties() { + // given a schema that defines multiple cardinality properties. + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(MULTI_PROPS)); + + // when adding a vertex with a multiple cardinality property + Vertex v = + g.addV("multi_v") + .property("multi_prop", "Hello") + .property("multi_prop", "Sweet") + .property("multi_prop", "World") + .next(); + + // then the created vertex should have the multi-cardinality property present with its values. + assertThat(v).hasProperty("multi_prop"); + Iterator> multiProp = v.properties("multi_prop"); + assertThat(multiProp) + .toIterable() + .extractingResultOf("value") + .containsExactly("Hello", "Sweet", "World"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalRemoteITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalRemoteITBase.java new file mode 100644 index 00000000000..3db8a7d1a12 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/remote/GraphTraversalRemoteITBase.java @@ -0,0 +1,661 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.remote; + +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsLabel; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsProperties; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.Assertions; +import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; +import com.datastax.dse.driver.api.core.graph.TinkerPathAssert; +import com.datastax.dse.driver.api.core.graph.__; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.process.traversal.Traversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.assertj.core.api.Assumptions; +import org.junit.Test; + +public abstract class GraphTraversalRemoteITBase { + + protected abstract CqlSession session(); + + protected abstract boolean isGraphBinary(); + + protected abstract GraphTraversalSource graphTraversalSource(); + + protected abstract SocialTraversalSource socialTraversalSource(); + + protected abstract CustomCcmRule ccmRule(); + + /** + * Ensures that a previously returned {@link Vertex}'s {@link Vertex#id()} can be used as an input + * to {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#V(Object...)} to + * retrieve the {@link Vertex} and that the returned {@link Vertex} is the same. + * + * @test_category dse:graph + */ + @Test + public void should_use_vertex_id_as_parameter() { + GraphTraversalSource g = graphTraversalSource(); + + // given an existing vertex + Vertex marko = g.V().hasLabel("person").has("name", "marko").next(); + if (isGraphBinary()) { + Map properties = + g.V().hasLabel("person").has("name", "marko").elementMap("name").next(); + + assertThat(properties).containsEntry("name", "marko"); + } else { + assertThat(marko).hasProperty("name", "marko"); + } + + // then should be able to retrieve that same vertex by id. + assertThat(g.V(marko.id()).next()).isEqualTo(marko); + } + + /** + * Ensures that a previously returned {@link Edge}'s {@link Edge#id()} can be used as an input to + * {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#E(Object...)} to + * retrieve the {@link Edge} and that the returned {@link Edge} is the same. + * + * @test_category dse:graph + */ + @Test + public void should_use_edge_is_as_parameter() { + GraphTraversalSource g = graphTraversalSource(); + + // given an existing edge + Edge created = g.E().has("weight", 0.2f).next(); + + if (isGraphBinary()) { + List> properties = + g.E().has("weight").elementMap("weight", "software", "person").toList(); + + assertThat(properties) + .anySatisfy( + props -> { + assertThatContainsProperties(props, "weight", 0.2f); + assertThatContainsLabel(props, Direction.IN, "software"); + assertThatContainsLabel(props, Direction.OUT, "person"); + }); + + } else { + assertThat(created) + .hasProperty("weight", 0.2f) + .hasInVLabel("software") + .hasOutVLabel("person"); + } + + // should be able to retrieve incoming and outgoing vertices by edge id + if (isGraphBinary()) { + Map inProperties = g.E(created.id()).inV().elementMap("name", "lang").next(); + Map outProperties = g.E(created.id()).outV().elementMap("name").next(); + assertThatContainsProperties(inProperties, "name", "lop", "lang", "java"); + assertThatContainsProperties(outProperties, "name", "peter"); + + } else { + Vertex in = g.E(created.id()).inV().next(); + Vertex out = g.E(created.id()).outV().next(); + + // should resolve to lop + assertThat(in).hasLabel("software").hasProperty("name", "lop").hasProperty("lang", "java"); + + // should resolve to marko, josh and peter whom created lop. + assertThat(out).hasLabel("person").hasProperty("name", "peter"); + } + } + + /** + * A sanity check that a returned {@link Vertex}'s id is a {@link Map}. This test could break in + * the future if the format of a vertex ID changes from a Map to something else in DSE. + * + * @test_category dse:graph + */ + @Test + public void should_deserialize_vertex_id_as_map() { + GraphTraversalSource g = graphTraversalSource(); + // given an existing vertex + Vertex marko = g.V().hasLabel("person").has("name", "marko").next(); + + // then id should be a map with expected values. + // Note: this is pretty dependent on DSE Graphs underlying id structure which may vary in + // the future. + if (isGraphBinary()) { + assertThat(((String) marko.id())).contains("marko"); + assertThat(marko.label()).isEqualTo("person"); + } else { + @SuppressWarnings("unchecked") + Map id = (Map) marko.id(); + assertThat(id) + .hasSize(3) + .containsEntry("~label", "person") + .containsKey("community_id") + .containsKey("member_id"); + } + } + + /** + * Ensures that a traversal that returns a result of mixed types is interpreted as a {@link Map} + * with {@link Object} values. Also uses {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal#by(Traversal)} with an + * anonymous traversal to get inbound 'created' edges and folds them into a list. + * + *

        Executes a vertex traversal that binds label 'a' and 'b' to vertex properties and label 'c' + * to vertices that have edges from that vertex. + * + * @test_category dse:graph + */ + @Test + public void should_handle_result_object_of_mixed_types() { + GraphTraversalSource g = graphTraversalSource(); + // find all software vertices and select name, language, and find all vertices that created + // such software. + List> results = + g.V() + .hasLabel("software") + .as("a", "b", "c") + .select("a", "b", "c") + .by("name") + .by("lang") + .by(__.in("created").fold()) + .toList(); + + // ensure that lop and ripple and their data are the results return. + assertThat(results).extracting(m -> m.get("a")).containsOnly("lop", "ripple"); + + for (Map result : results) { + assertThat(result).containsOnlyKeys("a", "b", "c"); + // both software are written in java. + assertThat(result.get("b")).isEqualTo("java"); + // ensure the created vertices match the creators of the software. + @SuppressWarnings("unchecked") + List vertices = (List) result.get("c"); + if (result.get("a").equals("lop")) { + if (isGraphBinary()) { + // should contain three vertices + assertThat(vertices.size()).isEqualTo(3); + } else { + // lop, 'c' should contain marko, josh, peter. + assertThat(vertices) + .extracting(vertex -> vertex.property("name").value()) + .containsOnly("marko", "josh", "peter"); + } + } else { + if (isGraphBinary()) { + // has only one label + assertThat(vertices.size()).isEqualTo(1); + } else { + assertThat(vertices) + .extracting(vertex -> vertex.property("name").value()) + .containsOnly("josh"); + } + } + } + } + + /** + * Ensures that a traversal that returns a sub graph can be retrieved. + * + *

        The subgraph is all members in a knows relationship, thus is all people who marko knows and + * the edges that connect them. + */ + @Test + public void should_handle_subgraph_graphson() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + + GraphTraversalSource g = graphTraversalSource(); + // retrieve a subgraph on the knows relationship, this omits the created edges. + Graph graph = (Graph) g.E().hasLabel("knows").subgraph("subGraph").cap("subGraph").next(); + + // there should only be 2 edges (since there are are only 2 knows relationships) and 3 vertices + assertThat(graph.edges()).toIterable().hasSize(2); + assertThat(graph.vertices()).toIterable().hasSize(3); + } + + /** + * Ensures that a traversal that returns a sub graph can be retrieved. + * + *

        The subgraph is all members in a knows relationship, thus is all people who marko knows and + * the edges that connect them. + */ + @Test + public void should_handle_subgraph_graph_binary() { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + + GraphTraversalSource g = graphTraversalSource(); + // retrieve a subgraph on the knows relationship, this omits the created edges. + String graph = (String) g.E().hasLabel("knows").subgraph("subGraph").cap("subGraph").next(); + + // there should only be 2 edges (since there are are only 2 knows relationships) and 3 vertices + assertThat(graph).contains("vertices:3").contains("edges:2"); + } + + /** + * Ensures a traversal that yields no results is properly retrieved and is empty. + * + * @test_category dse:graph + */ + @Test + public void should_return_zero_results() { + if (isGraphBinary()) { + assertThatThrownBy(() -> graphTraversalSource().V().hasLabel("notALabel").toList()) + .isInstanceOf(InvalidQueryException.class) + .hasMessageContaining("Unknown vertex label 'notALabel'"); + } else { + assertThat(graphTraversalSource().V().hasLabel("notALabel").toList()).isEmpty(); + } + } + + /** + * Validates that a traversal returning a {@link Tree} structure is returned appropriately with + * the expected contents. + * + *

        Retrieves trees of people marko knows and the software they created. + * + * @test_category dse:graph + */ + @Test + public void should_parse_tree() { + // Get a tree structure showing the paths from mark to people he knows to software they've + // created. + @SuppressWarnings("unchecked") + Tree tree = + graphTraversalSource() + .V() + .hasLabel("person") + .out("knows") + .out("created") + .tree() + .by("name") + .next(); + + // Marko knows josh who created lop and ripple. + assertThat(tree).tree("marko").tree("josh").tree("lop").isLeaf(); + + assertThat(tree).tree("marko").tree("josh").tree("ripple").isLeaf(); + } + + /** + * Validates that a traversal using lambda operations with anonymous traversals are applied + * appropriately and return the expected results. + * + *

        Traversal that filters 'person'-labeled vertices by name 'marko' and flatMaps outgoing + * vertices on the 'knows' relationship by their outgoing 'created' vertices and then maps by + * their 'name' property and folds them into one list. + * + *

        Note: This does not validate lambdas with functions as those can't be interpreted and + * sent remotely. + * + * @test_category dse:graph + */ + @Test + public void should_handle_lambdas() { + // Find all people marko knows and the software they created. + List software = + graphTraversalSource() + .V() + .hasLabel("person") + .filter(__.has("name", "marko")) + .out("knows") + .flatMap(__.out("created")) + .map(__.values("name")) + .fold() + .next(); + + // Marko only knows josh and vadas, of which josh created lop and ripple. + assertThat(software).containsOnly("lop", "ripple"); + } + + /** + * Validates that {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal#tryNext()} functions + * appropriate by returning an {@link Optional} of which the presence of the underlying data + * depends on whether or not remaining data is present. + * + *

        This is more of a test of Tinkerpop than the protocol between the client and DSE graph. + * + * @test_category dse:graph + */ + @Test + public void should_handle_tryNext() { + GraphTraversal traversal = + graphTraversalSource().V().hasLabel("person").has("name", "marko"); + + // value present + Optional v0 = traversal.tryNext(); + assertThat(v0.isPresent()).isTrue(); + if (!isGraphBinary()) { + assertThat(v0.get()).hasProperty("name", "marko"); + } + + // value absent as there was only 1 matching vertex. + Optional v1 = traversal.tryNext(); + assertThat(v1.isPresent()).isFalse(); + } + + /** + * Validates that {@link GraphTraversal#toStream()} appropriately creates a stream from the + * underlying iterator on the traversal, and then an attempt to call toStream again yields no + * results. + * + *

        This is more of a test of Tinkerpop than the protocol between the client and DSE graph. + * + * @test_category dse:graph + */ + @Test + public void should_handle_streaming_graphson() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + + GraphTraversal traversal = graphTraversalSource().V().hasLabel("person"); + // retrieve all person vertices to stream, and filter on client side all persons under age 30 + // and map to their name. + List under30 = + traversal + .toStream() + .filter(v -> v.property("age").value() < 30) + .map(v -> v.property("name").value()) + .collect(Collectors.toList()); + + assertThat(under30).containsOnly("marko", "vadas"); + + // attempt to get a stream again, which should be empty. + assertThat(traversal.toStream().collect(Collectors.toList())).isEmpty(); + } + + /** + * Validates that {@link GraphTraversal#toStream()} appropriately creates a stream from the + * underlying iterator on the traversal, and then an attempt to call toStream again yields no + * results. + * + *

        This is more of a test of Tinkerpop than the protocol between the client and DSE graph. + * + * @test_category dse:graph + */ + @Test + public void should_handle_streaming_binary() { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + + GraphTraversal> traversal = + graphTraversalSource().V().hasLabel("person").elementMap("age", "name"); + // retrieve all person vertices to stream, and filter on client side all persons under age 30 + // and map to their name. + List under30 = + traversal + .toStream() + .filter(v -> (Integer) v.get("age") < 30) + .map(v -> (String) v.get("name")) + .collect(Collectors.toList()); + + assertThat(under30).containsOnly("marko", "vadas"); + + // attempt to get a stream again, which should be empty. + assertThat(traversal.toStream().collect(Collectors.toList())).isEmpty(); + } + + /** + * Validates that when traversing a path and labeling some of the elements during the traversal + * that the output elements are properly labeled. + * + * @test_category dse:graph + */ + @Test + public void should_resolve_path_with_some_labels() { + // given a traversal where some objects have labels. + List paths = + graphTraversalSource() + .V() + .hasLabel("person") + .has("name", "marko") + .as("a") + .outE("knows") + .inV() + .as("c", "d") + .outE("created") + .as("e", "f", "g") + .inV() + .path() + .toList(); + + // then the paths returned should be labeled for the + // appropriate objects, and not labeled otherwise. + for (Path path : paths) { + TinkerPathAssert.validatePathObjects(path); + assertThat(path) + .hasLabel(0, "a") + .hasNoLabel(1) + .hasLabel(2, "c", "d") + .hasLabel(3, "e", "f", "g") + .hasNoLabel(4); + } + } + + /** + * Validates that when traversing a path and labeling all of the elements during the traversal + * that the output elements are properly labeled. + * + * @test_category dse:graph + */ + @Test + public void should_resolve_path_with_labels() { + // given a traversal where all objects have labels. + List paths = + graphTraversalSource() + .V() + .hasLabel("person") + .has("name", "marko") + .as("a") + .outE("knows") + .as("b") + .inV() + .as("c", "d") + .outE("created") + .as("e", "f", "g") + .inV() + .as("h") + .path() + .toList(); + + // then the paths returned should be labeled for all + // objects. + for (Path path : paths) { + TinkerPathAssert.validatePathObjects(path); + Assertions.assertThat(path.labels()).hasSize(5); + assertThat(path) + .hasLabel(0, "a") + .hasLabel(1, "b") + .hasLabel(2, "c", "d") + .hasLabel(3, "e", "f", "g") + .hasLabel(4, "h"); + } + } + + /** + * Validates that when traversing a path and labeling none of the elements during the traversal + * that all the labels are empty in the result. + * + * @test_category dse:graph + */ + @Test + public void should_resolve_path_without_labels() { + // given a traversal where no objects have labels. + List paths = + graphTraversalSource() + .V() + .hasLabel("person") + .has("name", "marko") + .outE("knows") + .inV() + .outE("created") + .inV() + .path() + .toList(); + + // then the paths returned should be labeled for + // all objects. + for (Path path : paths) { + TinkerPathAssert.validatePathObjects(path); + for (int i = 0; i < 5; i++) assertThat(path).hasNoLabel(i); + } + } + + @Test + public void should_handle_asynchronous_execution_graphson() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + + StringBuilder names = new StringBuilder(); + + CompletableFuture> future = + graphTraversalSource().V().hasLabel("person").promise(Traversal::toList); + try { + // dumb processing to make sure the completable future works correctly and correct results are + // returned + future + .thenAccept( + vertices -> vertices.forEach(vertex -> names.append((String) vertex.value("name")))) + .get(); + } catch (InterruptedException | ExecutionException e) { + fail("Shouldn't have thrown an exception waiting for the result to complete"); + } + + assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); + } + + @Test + public void should_handle_asynchronous_execution_graph_binary() { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + + StringBuilder names = new StringBuilder(); + + CompletableFuture> future = + graphTraversalSource().V().hasLabel("person").promise(Traversal::toList); + try { + // dumb processing to make sure the completable future works correctly and correct results are + // returned + future.thenAccept(vertices -> vertices.forEach(vertex -> names.append(vertex.id()))).get(); + } catch (InterruptedException | ExecutionException e) { + fail("Shouldn't have thrown an exception waiting for the result to complete"); + } + + assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); + } + + /** + * Validates that if a traversal is made that encounters an error on the server side that the + * exception is set on the future. + * + * @test_category dse:graph + */ + @Test + @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1.0") + public void should_fail_future_returned_from_promise_on_query_error() throws Exception { + CompletableFuture future = + graphTraversalSource().V("invalidid").peerPressure().promise(Traversal::next); + + try { + future.get(); + fail("Expected an ExecutionException"); + } catch (ExecutionException e) { + assertThat(e.getCause()).isInstanceOf(InvalidQueryException.class); + } + } + + /** + * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for + * use with DSLs. + * + * @test_category dse:graph + */ + @Test + public void should_allow_use_of_dsl_graphson() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + + List vertices = socialTraversalSource().persons("marko").knows("vadas").toList(); + assertThat(vertices.size()).isEqualTo(1); + assertThat(vertices.get(0)) + .hasProperty("name", "marko") + .hasProperty("age", 29) + .hasLabel("person"); + } + + /** + * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for + * use with DSLs. + * + * @test_category dse:graph + */ + @Test + public void should_allow_use_of_dsl_graph_binary() { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + + List> vertices = + socialTraversalSource().persons("marko").knows("vadas").elementMap("name", "age").toList(); + assertThat(vertices.size()).isEqualTo(1); + + assertThatContainsProperties(vertices.get(0), "name", "marko", "age", 29); + assertThat(vertices.get(0).values()).contains("person"); + } + + /** + * Ensures that traversals with barriers (which return results bulked) contain the correct amount + * of end results. + * + *

        This will fail if ran against DSE < 5.0.9 or DSE < 5.1.2. + */ + @Test + public void should_return_correct_results_when_bulked() { + Assumptions.assumeThat( + CcmBridge.isDistributionOf( + BackendType.DSE, (dist, cass) -> dist.compareTo(Version.parse("5.1.2")) > 0)) + .isTrue(); + + List results = graphTraversalSource().E().label().barrier().toList(); + Collections.sort(results); + + List expected = + Arrays.asList("knows", "created", "created", "knows", "created", "created"); + Collections.sort(expected); + + assertThat(results).isEqualTo(expected); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeFluentIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeFluentIT.java new file mode 100644 index 00000000000..d1355100c4b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeFluentIT.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.ClassicGraphDataTypeITBase; +import com.datastax.dse.driver.api.core.graph.DseGraph; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.3", + description = "DSE 5.0.3 required for fluent API support") +public class ClassicGraphDataTypeFluentIT extends ClassicGraphDataTypeITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); + } + + @Override + public CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + public Vertex insertVertexAndReturn(String vertexLabel, String propertyName, Object value) { + return SESSION_RULE + .session() + .execute( + FluentGraphStatement.newInstance( + DseGraph.g.addV(vertexLabel).property(propertyName, value))) + .one() + .asVertex(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeScriptIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeScriptIT.java new file mode 100644 index 00000000000..81f088d0c18 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphDataTypeScriptIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.ClassicGraphDataTypeITBase; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.4", + description = "DSE 5.0.4 required for script API with GraphSON 2") +public class ClassicGraphDataTypeScriptIT extends ClassicGraphDataTypeITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); + } + + @Override + public CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + public Vertex insertVertexAndReturn(String vertexLabel, String propertyName, Object value) { + return SESSION_RULE + .session() + .execute( + ScriptGraphStatement.builder("g.addV(labelP).property(nameP, valueP)") + .setQueryParam("labelP", vertexLabel) + .setQueryParam("nameP", propertyName) + .setQueryParam("valueP", value) + .build()) + .one() + .asVertex(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalBatchIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalBatchIT.java new file mode 100644 index 00000000000..81f39753856 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalBatchIT.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.0", + description = "DSE 6.0 required for BatchGraphStatement.") +public class ClassicGraphTraversalBatchIT extends GraphTraversalBatchITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = EmptyGraph.instance().traversal(); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_NOT_STRICT)); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected boolean isGraphBinary() { + return false; + } + + @Override + protected CustomCcmRule ccmRule() { + return CCM_RULE; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return g; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalIT.java new file mode 100644 index 00000000000..672e1b6f679 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/ClassicGraphTraversalIT.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.9", + description = "DSE 5.0.9 required for inserting edges and vertices script.") +public class ClassicGraphTraversalIT extends GraphTraversalITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource graphTraversalSource = EmptyGraph.instance().traversal(); + private final SocialTraversalSource socialTraversal = + EmptyGraph.instance().traversal(SocialTraversalSource.class); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.ALLOW_SCANS)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CLASSIC_GRAPH)); + SESSION_RULE + .session() + .execute(ScriptGraphStatement.newInstance(SampleGraphScripts.MAKE_STRICT)); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected boolean isGraphBinary() { + return false; + } + + @Override + protected CustomCcmRule ccmRule() { + return CCM_RULE; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return graphTraversalSource; + } + + @Override + protected SocialTraversalSource socialTraversalSource() { + return socialTraversal; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeFluentIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeFluentIT.java new file mode 100644 index 00000000000..94e6415f471 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeFluentIT.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import static com.datastax.dse.driver.api.core.graph.DseGraph.g; +import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.unfold; + +import com.datastax.dse.driver.api.core.graph.CoreGraphDataTypeITBase; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +@RunWith(DataProviderRunner.class) +public class CoreGraphDataTypeFluentIT extends CoreGraphDataTypeITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected String graphName() { + return SESSION_RULE.getGraphName(); + } + + @Override + public Map insertVertexThenReadProperties( + Map properties, int vertexID, String vertexLabel) { + GraphTraversal traversal = g.addV(vertexLabel).property("id", vertexID); + + for (Map.Entry entry : properties.entrySet()) { + String typeDefinition = entry.getKey(); + String propName = formatPropertyName(typeDefinition); + Object value = entry.getValue(); + traversal = traversal.property(propName, value); + } + + session().execute(FluentGraphStatement.newInstance(traversal)); + + return session() + .execute( + FluentGraphStatement.newInstance( + g.V().has(vertexLabel, "id", vertexID).valueMap().by(unfold()))) + .one() + .asMap(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeScriptIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeScriptIT.java new file mode 100644 index 00000000000..b79aecf6c6a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphDataTypeScriptIT.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.CoreGraphDataTypeITBase; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatementBuilder; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.util.Map; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +@RunWith(DataProviderRunner.class) +public class CoreGraphDataTypeScriptIT extends CoreGraphDataTypeITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected String graphName() { + return SESSION_RULE.getGraphName(); + } + + @Override + protected Map insertVertexThenReadProperties( + Map properties, int vertexID, String vertexLabel) { + StringBuilder insert = new StringBuilder("g.addV(vertexLabel).property('id', vertexID)"); + + ScriptGraphStatementBuilder statementBuilder = + new ScriptGraphStatementBuilder() + .setQueryParam("vertexID", vertexID) + .setQueryParam("vertexLabel", vertexLabel); + + for (Map.Entry entry : properties.entrySet()) { + String typeDefinition = entry.getKey(); + String propName = formatPropertyName(typeDefinition); + Object value = entry.getValue(); + + insert.append(String.format(".property('%s', %s)", propName, propName)); + statementBuilder = statementBuilder.setQueryParam(propName, value); + } + + session().execute(statementBuilder.setScript(insert.toString()).build()); + + return session() + .execute( + ScriptGraphStatement.newInstance( + "g.V().has(vertexLabel, 'id', vertexID).valueMap().by(unfold())") + .setQueryParam("vertexID", vertexID) + .setQueryParam("vertexLabel", vertexLabel)) + .one() + .asMap(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalBatchIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalBatchIT.java new file mode 100644 index 00000000000..be09ac1bfb2 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalBatchIT.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +public class CoreGraphTraversalBatchIT extends GraphTraversalBatchITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource g = EmptyGraph.instance().traversal().with("allow-filtering"); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CORE_GRAPH)); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected boolean isGraphBinary() { + return true; + } + + @Override + protected CustomCcmRule ccmRule() { + return CCM_RULE; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return g; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalIT.java new file mode 100644 index 00000000000..d97b0da958a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/CoreGraphTraversalIT.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.SampleGraphScripts; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8.0", + description = "DSE 6.8.0 required for Core graph support") +public class CoreGraphTraversalIT extends GraphTraversalITBase { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.CCM_BUILDER_WITH_GRAPH.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getCoreGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private final GraphTraversalSource graphTraversalSource = + EmptyGraph.instance().traversal().with("allow-filtering"); + private final SocialTraversalSource socialTraversalSource = + EmptyGraph.instance().traversal(SocialTraversalSource.class).with("allow-filtering"); + + @BeforeClass + public static void setupSchema() { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(SampleGraphScripts.CORE_GRAPH)); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @Override + protected boolean isGraphBinary() { + return true; + } + + @Override + protected CustomCcmRule ccmRule() { + return CCM_RULE; + } + + @Override + protected GraphTraversalSource graphTraversalSource() { + return graphTraversalSource; + } + + @Override + protected SocialTraversalSource socialTraversalSource() { + return socialTraversalSource; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalBatchITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalBatchITBase.java new file mode 100644 index 00000000000..0c8c3b8b5d4 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalBatchITBase.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsLabel; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsProperties; +import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.addE; +import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.addV; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.api.core.graph.BatchGraphStatement; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.Map; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.junit.Test; + +public abstract class GraphTraversalBatchITBase { + + protected abstract CqlSession session(); + + protected abstract boolean isGraphBinary(); + + protected abstract CustomCcmRule ccmRule(); + + protected abstract GraphTraversalSource graphTraversalSource(); + + @Test + public void should_allow_vertex_and_edge_insertions_in_batch() { + BatchGraphStatement batch = + BatchGraphStatement.builder() + .addTraversals( + ImmutableList.of( + addV("person").property("name", "batch1").property("age", 1), + addV("person").property("name", "batch2").property("age", 2))) + .build(); + + BatchGraphStatement batch2 = + BatchGraphStatement.builder() + .addTraversals(batch) + .addTraversal( + addE("knows") + .from(__.V().has("name", "batch1")) + .to(__.V().has("name", "batch2")) + .property("weight", 2.3f)) + .build(); + + assertThat(batch.size()).isEqualTo(2); + assertThat(batch2.size()).isEqualTo(3); + + session().execute(batch2); + + if (isGraphBinary()) { + Map properties = + session() + .execute( + FluentGraphStatement.newInstance( + graphTraversalSource().V().has("name", "batch1").elementMap("age"))) + .one() + .asMap(); + + assertThatContainsProperties(properties, "age", 1); + + properties = + session() + .execute( + FluentGraphStatement.newInstance( + graphTraversalSource().V().has("name", "batch2").elementMap("age"))) + .one() + .asMap(); + + assertThatContainsProperties(properties, "age", 2); + + properties = + session() + .execute( + FluentGraphStatement.newInstance( + graphTraversalSource() + .V() + .has("name", "batch1") + .bothE() + .elementMap("weight", "person"))) + .one() + .asMap(); + + assertThatContainsProperties(properties, "weight", 2.3f); + assertThatContainsLabel(properties, Direction.IN, "person"); + assertThatContainsLabel(properties, Direction.OUT, "person"); + + } else { + + assertThat( + session() + .execute( + FluentGraphStatement.newInstance( + graphTraversalSource().V().has("name", "batch1"))) + .one() + .asVertex()) + .hasProperty("age", 1); + + assertThat( + session() + .execute( + FluentGraphStatement.newInstance( + graphTraversalSource().V().has("name", "batch2"))) + .one() + .asVertex()) + .hasProperty("age", 2); + + assertThat( + session() + .execute( + FluentGraphStatement.newInstance( + graphTraversalSource().V().has("name", "batch1").bothE())) + .one() + .asEdge()) + .hasProperty("weight", 2.3f) + .hasOutVLabel("person") + .hasInVLabel("person"); + } + } + + @Test + public void should_fail_if_no_bytecode_in_batch() { + BatchGraphStatement batch = + BatchGraphStatement.builder().addTraversals(ImmutableList.of()).build(); + assertThat(batch.size()).isEqualTo(0); + try { + session().execute(batch); + fail( + "Should have thrown InvalidQueryException because batch does not contain any traversals."); + } catch (InvalidQueryException e) { + assertThat(e.getMessage()) + .contains("The batch statement sent does not contain any traversal"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalITBase.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalITBase.java new file mode 100644 index 00000000000..5bcb01bc165 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalITBase.java @@ -0,0 +1,668 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import static com.datastax.dse.driver.api.core.graph.FluentGraphStatement.newInstance; +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; +import static com.datastax.dse.driver.api.core.graph.TinkerPathAssert.validatePathObjects; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsLabel; +import static com.datastax.dse.driver.internal.core.graph.GraphTestUtils.assertThatContainsProperties; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.api.core.graph.AsyncGraphResultSet; +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphStatement; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.dse.driver.api.core.graph.SocialTraversalSource; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.collect.Lists; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import org.apache.tinkerpop.gremlin.process.traversal.Path; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__; +import org.apache.tinkerpop.gremlin.process.traversal.step.util.Tree; +import org.apache.tinkerpop.gremlin.structure.Direction; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Graph; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.assertj.core.api.Assertions; +import org.assertj.core.api.Assumptions; +import org.junit.Test; + +public abstract class GraphTraversalITBase { + + protected abstract CqlSession session(); + + protected abstract boolean isGraphBinary(); + + protected abstract CustomCcmRule ccmRule(); + + protected abstract GraphTraversalSource graphTraversalSource(); + + protected abstract SocialTraversalSource socialTraversalSource(); + + /** + * Ensures that a previously returned {@link Vertex}'s {@link Vertex#id()} can be used as an input + * to {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#V(Object...)} to + * retrieve the {@link Vertex} and that the returned {@link Vertex} is the same. + * + * @test_category dse:graph + */ + @Test + public void should_use_vertex_id_as_parameter() { + GraphTraversal query = + graphTraversalSource().V().hasLabel("person").has("name", "marko"); + GraphResultSet resultSet = session().execute(newInstance(query)); + + List results = resultSet.all(); + + assertThat(results.size()).isEqualTo(1); + Vertex marko = results.get(0).asVertex(); + if (isGraphBinary()) { + Map properties = + session().execute(newInstance(query.elementMap("name"))).one().asMap(); + assertThatContainsProperties(properties, "name", "marko"); + } else { + assertThat(marko).hasProperty("name", "marko"); + } + + resultSet = session().execute(newInstance(graphTraversalSource().V(marko.id()))); + + results = resultSet.all(); + assertThat(results.size()).isEqualTo(1); + Vertex marko2 = results.get(0).asVertex(); + // Ensure that the returned vertex is the same as the first. + assertThat(marko2).isEqualTo(marko); + } + + /** + * Ensures that a previously returned {@link Edge}'s {@link Edge#id()} can be used as an input to + * {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource#E(Object...)} to + * retrieve the {@link Edge} and that the returned {@link Edge} is the same. + * + * @test_category dse:graph + */ + @Test + public void should_use_edge_id_as_parameter() { + GraphTraversal query = graphTraversalSource().E().has("weight", 0.2f); + GraphResultSet resultSet = session().execute(newInstance(query)); + + List results = resultSet.all(); + assertThat(results.size()).isEqualTo(1); + + Edge created = results.get(0).asEdge(); + if (isGraphBinary()) { + Map properties = + session() + .execute(newInstance(query.elementMap("weight", "software", "person"))) + .one() + .asMap(); + + assertThatContainsProperties(properties, "weight", 0.2f); + assertThatContainsLabel(properties, Direction.IN, "software"); + assertThatContainsLabel(properties, Direction.OUT, "person"); + } else { + assertThat(created) + .hasProperty("weight", 0.2f) + .hasInVLabel("software") + .hasOutVLabel("person"); + } + + if (isGraphBinary()) { + Map inProperties = + session() + .execute( + newInstance( + graphTraversalSource().E(created.id()).inV().elementMap("name", "lang"))) + .one() + .asMap(); + assertThatContainsProperties(inProperties, "name", "lop", "lang", "java"); + } else { + resultSet = session().execute(newInstance(graphTraversalSource().E(created.id()).inV())); + results = resultSet.all(); + assertThat(results.size()).isEqualTo(1); + Vertex lop = results.get(0).asVertex(); + + assertThat(lop).hasLabel("software").hasProperty("name", "lop").hasProperty("lang", "java"); + } + } + + /** + * A sanity check that a returned {@link Vertex}'s id is a {@link Map}. This test could break in + * the future if the format of a vertex ID changes from a Map to something else in DSE. + * + * @test_category dse:graph + */ + @Test + public void should_deserialize_vertex_id_as_map() { + GraphResultSet resultSet = + session() + .execute( + newInstance(graphTraversalSource().V().hasLabel("person").has("name", "marko"))); + + List results = resultSet.all(); + assertThat(results.size()).isEqualTo(1); + + Vertex marko = results.get(0).asVertex(); + + if (isGraphBinary()) { + assertThat(((String) marko.id())).contains("marko"); + assertThat(marko.label()).isEqualTo("person"); + } else { + assertThat(marko).hasProperty("name", "marko"); + @SuppressWarnings("unchecked") + Map id = (Map) marko.id(); + assertThat(id) + .hasSize(3) + .containsEntry("~label", "person") + .containsKey("community_id") + .containsKey("member_id"); + } + } + + /** + * Ensures that a traversal that returns a result of mixed types is interpreted as a {@link Map} + * with {@link Object} values. Also uses {@link + * org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal#by(org.apache.tinkerpop.gremlin.process.traversal.Traversal)} + * with an anonymous traversal to get inbound 'created' edges and folds them into a list. + * + *

        Executes a vertex traversal that binds label 'a' and 'b' to vertex properties and label 'c' + * to vertices that have edges from that vertex. + * + * @test_category dse:graph + */ + @Test + public void should_handle_result_object_of_mixed_types() { + // find all software vertices and select name, language, and find all vertices that created such + // software. + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .V() + .hasLabel("software") + .as("a", "b", "c") + .select("a", "b", "c") + .by("name") + .by("lang") + .by(__.in("created").fold()))); + + List results = rs.all(); + assertThat(results.size()).isEqualTo(2); + + // Ensure that we got 'lop' and 'ripple' for property a. + assertThat(results) + .extracting(m -> m.getByKey("a").as(Object.class)) + .containsOnly("lop", "ripple"); + + for (GraphNode result : results) { + // The row should represent a map with a, b, and c keys. + assertThat(ImmutableList.copyOf(result.keys())).containsOnlyOnce("a", "b", "c"); + // 'e' should not exist, thus it should be null. + assertThat(result.getByKey("e")).isNull(); + // both software are written in java. + assertThat(result.getByKey("b").isNull()).isFalse(); + assertThat(result.getByKey("b").asString()).isEqualTo("java"); + GraphNode c = result.getByKey("c"); + assertThat(c.isList()).isTrue(); + if (result.getByKey("a").asString().equals("lop")) { + if (isGraphBinary()) { + // should contain three vertices + Assertions.assertThat(c.size()).isEqualTo(3); + } else { + // 'c' should contain marko, josh, peter. + // Ensure we have three vertices. + assertThat(c.size()).isEqualTo(3); + List vertices = + Lists.newArrayList( + c.getByIndex(0).asVertex(), + c.getByIndex(1).asVertex(), + c.getByIndex(2).asVertex()); + assertThat(vertices) + .extracting(vertex -> vertex.property("name").value()) + .containsOnly("marko", "josh", "peter"); + } + } else { + if (isGraphBinary()) { + // has only one label + Assertions.assertThat(c.size()).isEqualTo(1); + } else { + // ripple, 'c' should contain josh. + // Ensure we have 1 vertex. + assertThat(c.size()).isEqualTo(1); + Vertex vertex = c.getByIndex(0).asVertex(); + assertThat(vertex).hasProperty("name", "josh"); + } + } + } + } + + /** + * Ensures a traversal that yields no results is properly retrieved and is empty. + * + * @test_category dse:graph + */ + @Test + public void should_return_zero_results() { + if (isGraphBinary()) { + assertThatThrownBy( + () -> + session().execute(newInstance(graphTraversalSource().V().hasLabel("notALabel")))) + .isInstanceOf(InvalidQueryException.class) + .hasMessageContaining("Unknown vertex label 'notALabel'"); + } else { + GraphResultSet rs = + session().execute(newInstance(graphTraversalSource().V().hasLabel("notALabel"))); + assertThat(rs.all().size()).isZero(); + } + } + + /** + * Ensures a traversal that yields no results is properly retrieved and is empty, using GraphSON2 + * and the TinkerPop transform results function. + * + * @test_category dse:graph + */ + @Test + public void should_return_zero_results_graphson_2() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + + GraphStatement simpleGraphStatement = + ScriptGraphStatement.newInstance("g.V().hasLabel('notALabel')"); + + GraphResultSet rs = session().execute(simpleGraphStatement); + assertThat(rs.one()).isNull(); + } + + /** + * Validates that a traversal using lambda operations with anonymous traversals are applied + * appropriately and return the expected results. + * + *

        Traversal that filters 'person'-labeled vertices by name 'marko' and flatMaps outgoing + * vertices on the 'knows' relationship by their outgoing 'created' vertices and then maps by + * their 'name' property and folds them into one list. + * + *

        Note: This does not validate lambdas with functions as those can't be interpreted and + * sent remotely. + * + * @test_category dse:graph + */ + @Test + public void should_handle_lambdas() { + // Find all people marko knows and the software they created. + GraphResultSet result = + session() + .execute( + newInstance( + graphTraversalSource() + .V() + .hasLabel("person") + .filter(__.has("name", "marko")) + .out("knows") + .flatMap(__.out("created")) + .map(__.values("name")) + .fold())); + + // Marko only knows josh and vadas, of which josh created lop and ripple. + List software = result.one().as(GenericType.listOf(String.class)); + assertThat(software).containsOnly("lop", "ripple"); + } + + /** + * Validates that when traversing a path and labeling some of the elements during the traversal + * that the output elements are properly labeled. + * + * @test_category dse:graph + */ + @Test + public void should_resolve_path_with_some_labels() { + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .V() + .hasLabel("person") + .has("name", "marko") + .as("a") + .outE("knows") + .inV() + .as("c", "d") + .outE("created") + .as("e", "f", "g") + .inV() + .path())); + + List results = rs.all(); + assertThat(results.size()).isEqualTo(2); + for (GraphNode result : results) { + Path path = result.asPath(); + validatePathObjects(path); + assertThat(path.labels()).hasSize(5); + assertThat(path) + .hasLabel(0, "a") + .hasNoLabel(1) + .hasLabel(2, "c", "d") + .hasLabel(3, "e", "f", "g") + .hasNoLabel(4); + } + } + + /** + * Validates that when traversing a path and labeling all of the elements during the traversal + * that the output elements are properly labeled. + * + * @test_category dse:graph + */ + @Test + public void should_resolve_path_with_labels() { + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .V() + .hasLabel("person") + .has("name", "marko") + .as("a") + .outE("knows") + .as("b") + .inV() + .as("c", "d") + .outE("created") + .as("e", "f", "g") + .inV() + .as("h") + .path())); + List results = rs.all(); + assertThat(results.size()).isEqualTo(2); + for (GraphNode result : results) { + Path path = result.asPath(); + validatePathObjects(path); + assertThat(path.labels()).hasSize(5); + assertThat(path) + .hasLabel(0, "a") + .hasLabel(1, "b") + .hasLabel(2, "c", "d") + .hasLabel(3, "e", "f", "g") + .hasLabel(4, "h"); + } + } + + /** + * Validates that when traversing a path and labeling none of the elements during the traversal + * that all the labels are empty in the result. + * + * @test_category dse:graph + */ + @Test + public void should_resolve_path_without_labels() { + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .V() + .hasLabel("person") + .has("name", "marko") + .outE("knows") + .inV() + .outE("created") + .inV() + .path())); + List results = rs.all(); + assertThat(results.size()).isEqualTo(2); + for (GraphNode result : results) { + Path path = result.asPath(); + validatePathObjects(path); + assertThat(path.labels()).hasSize(5); + for (int i = 0; i < 5; i++) assertThat(path).hasNoLabel(i); + } + } + + /** + * Validates that a traversal returning a Tree structure is returned appropriately with the + * expected contents. + * + *

        Retrieves trees of people marko knows and the software they created. + * + * @test_category dse:graph + */ + @Test + public void should_parse_tree() { + // Get a tree structure showing the paths from mark to people he knows to software they've + // created. + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .V() + .hasLabel("person") + .out("knows") + .out("created") + .tree() + .by("name"))); + + List results = rs.all(); + assertThat(results.size()).isEqualTo(1); + + // [{key=marko, value=[{key=josh, value=[{key=ripple, value=[]}, {key=lop, value=[]}]}]}] + GraphNode result = results.get(0); + + @SuppressWarnings("unchecked") + Tree tree = result.as(Tree.class); + + assertThat(tree).tree("marko").tree("josh").tree("lop").isLeaf(); + + assertThat(tree).tree("marko").tree("josh").tree("ripple").isLeaf(); + } + + /** + * Ensures that a traversal that returns a sub graph can be retrieved. + * + *

        The subgraph is all members in a knows relationship, thus is all people who marko knows and + * the edges that connect them. + */ + @Test + public void should_handle_subgraph_graphson() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .E() + .hasLabel("knows") + .subgraph("subGraph") + .cap("subGraph"))); + + List results = rs.all(); + assertThat(results.size()).isEqualTo(1); + + Graph graph = results.get(0).as(Graph.class); + + assertThat(graph.edges()).toIterable().hasSize(2); + assertThat(graph.vertices()).toIterable().hasSize(3); + } + + /** + * Ensures that a traversal that returns a sub graph can be retrieved. + * + *

        The subgraph is all members in a knows relationship, thus is all people who marko knows and + * the edges that connect them. + */ + @Test + public void should_handle_subgraph_grap_binary() { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + GraphResultSet rs = + session() + .execute( + newInstance( + graphTraversalSource() + .E() + .hasLabel("knows") + .subgraph("subGraph") + .cap("subGraph"))); + + List results = rs.all(); + assertThat(results.size()).isEqualTo(1); + + String graph = results.get(0).as(String.class); + + assertThat(graph).contains("vertices:3").contains("edges:2"); + } + + /** + * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for + * use with DSLs. + * + * @test_category dse:graph + */ + @Test + public void should_allow_use_of_dsl_graphson() throws Exception { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + SocialTraversalSource gSocial = socialTraversalSource(); + + GraphStatement gs = newInstance(gSocial.persons("marko").knows("vadas")); + + GraphResultSet rs = session().execute(gs); + List results = rs.all(); + + assertThat(results.size()).isEqualTo(1); + assertThat(results.get(0).asVertex()) + .hasProperty("name", "marko") + .hasProperty("age", 29) + .hasLabel("person"); + } + + /** + * A simple smoke test to ensure that a user can supply a custom {@link GraphTraversalSource} for + * use with DSLs. + * + * @test_category dse:graph + */ + @Test + public void should_allow_use_of_dsl_graph_binary() throws Exception { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + SocialTraversalSource gSocial = socialTraversalSource(); + + GraphStatement gs = + newInstance(gSocial.persons("marko").knows("vadas").elementMap("name", "age")); + + GraphResultSet rs = session().execute(gs); + List results = rs.all(); + + assertThat(results.size()).isEqualTo(1); + assertThatContainsProperties(results.get(0).asMap(), "name", "marko", "age", 29); + Assertions.assertThat(results.get(0).asMap().values()).contains("person"); + } + + /** + * Ensures that traversals with barriers (which return results bulked) contain the correct amount + * of end results. + * + *

        This will fail if ran against DSE < 5.0.9 or DSE < 5.1.2. + */ + @Test + public void should_return_correct_results_when_bulked() { + Assumptions.assumeThat( + CcmBridge.isDistributionOf( + BackendType.DSE, (dist, cass) -> dist.compareTo(Version.parse("5.1.2")) > 0)) + .isTrue(); + + GraphResultSet rs = + session().execute(newInstance(graphTraversalSource().E().label().barrier())); + + List results = + rs.all().stream().map(GraphNode::asString).sorted().collect(Collectors.toList()); + + assertThat(results) + .hasSize(6) + .containsSequence("created", "created", "created", "created") + .containsSequence("knows", "knows"); + } + + @Test + public void should_handle_asynchronous_execution_graphson() { + Assumptions.assumeThat(isGraphBinary()).isFalse(); + StringBuilder names = new StringBuilder(); + + CompletionStage future = + session() + .executeAsync( + FluentGraphStatement.newInstance(graphTraversalSource().V().hasLabel("person"))); + + try { + // dumb processing to make sure the completable future works correctly and correct results are + // returned + Iterable results = + future.thenApply(AsyncGraphResultSet::currentPage).toCompletableFuture().get(); + for (GraphNode gn : results) { + names.append(gn.asVertex().property("name").value()); + } + } catch (InterruptedException | ExecutionException e) { + fail("Shouldn't have thrown an exception waiting for the result to complete"); + } + + assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); + } + + @Test + public void should_handle_asynchronous_execution_graph_binary() { + Assumptions.assumeThat(isGraphBinary()).isTrue(); + StringBuilder names = new StringBuilder(); + + CompletionStage future = + session() + .executeAsync( + FluentGraphStatement.newInstance(graphTraversalSource().V().hasLabel("person"))); + + try { + // dumb processing to make sure the completable future works correctly and correct results are + // returned + Iterable results = + future.thenApply(AsyncGraphResultSet::currentPage).toCompletableFuture().get(); + for (GraphNode gn : results) { + names.append(gn.asVertex().id()); + } + } catch (InterruptedException | ExecutionException e) { + fail("Shouldn't have thrown an exception waiting for the result to complete"); + } + + assertThat(names.toString()).contains("peter", "marko", "vadas", "josh"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMetaPropertiesIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMetaPropertiesIT.java new file mode 100644 index 00000000000..d8058cbf59e --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMetaPropertiesIT.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +// INFO: meta props are going away in NGDG + +import static com.datastax.dse.driver.api.core.graph.DseGraph.g; +import static com.datastax.dse.driver.api.core.graph.FluentGraphStatement.newInstance; +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +// INFO: meta props are going away in NGDG + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.3", + description = "DSE 5.0.3 required for remote TinkerPop support") +public class GraphTraversalMetaPropertiesIT { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + /** Builds a simple schema that provides for a vertex with a property with sub properties. */ + private static final String META_PROPS = + MAKE_STRICT + + ALLOW_SCANS + + "schema.propertyKey('sub_prop').Text().create()\n" + + "schema.propertyKey('sub_prop2').Text().create()\n" + + "schema.propertyKey('meta_prop').Text().properties('sub_prop', 'sub_prop2').create()\n" + + "schema.vertexLabel('meta_v').properties('meta_prop').create()"; + + /** + * Ensures that a traversal that yields a vertex with a property that has its own properties that + * is appropriately parsed and made accessible via {@link VertexProperty#property(String)}. + * + * @test_category dse:graph + */ + @Test + public void should_parse_meta_properties() { + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(META_PROPS)); + + GraphResultSet result = + SESSION_RULE + .session() + .execute( + newInstance( + g.addV("meta_v") + .property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2"))); + + Vertex v = result.one().asVertex(); + assertThat(v).hasProperty("meta_prop"); + + VertexProperty metaProp = v.property("meta_prop"); + assertThat(metaProp) + .hasValue("hello") + .hasProperty("sub_prop", "hi") + .hasProperty("sub_prop2", "hi2"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMultiPropertiesIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMultiPropertiesIT.java new file mode 100644 index 00000000000..c30e770f40e --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/graph/statement/GraphTraversalMultiPropertiesIT.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.graph.statement; + +import static com.datastax.dse.driver.api.core.graph.DseGraph.g; +import static com.datastax.dse.driver.api.core.graph.FluentGraphStatement.newInstance; +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.ALLOW_SCANS; +import static com.datastax.dse.driver.api.core.graph.SampleGraphScripts.MAKE_STRICT; +import static com.datastax.dse.driver.api.core.graph.TinkerGraphAssertions.assertThat; + +import com.datastax.dse.driver.api.core.graph.GraphResultSet; +import com.datastax.dse.driver.api.core.graph.GraphTestSupport; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.util.Iterator; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.apache.tinkerpop.gremlin.structure.VertexProperty; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +// INFO: multi props are not supported in Core +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0.3", + description = "DSE 5.0.3 required for remote TinkerPop support") +public class GraphTraversalMultiPropertiesIT { + + private static final CustomCcmRule CCM_RULE = GraphTestSupport.GRAPH_CCM_RULE_BUILDER.build(); + + private static final SessionRule SESSION_RULE = + GraphTestSupport.getClassicGraphSessionBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + /** Builds a simple schema that provides for a vertex with a multi-cardinality property. */ + private static final String MULTI_PROPS = + MAKE_STRICT + + ALLOW_SCANS + + "schema.propertyKey('multi_prop').Text().multiple().create()\n" + + "schema.vertexLabel('multi_v').properties('multi_prop').create()\n"; + + /** + * Ensures that a traversal that yields a vertex with a property name that is present multiple + * times that the properties are parsed and made accessible via {@link + * Vertex#properties(String...)}. + * + * @test_category dse:graph + */ + @Test + public void should_parse_multiple_cardinality_properties() { + // given a schema that defines multiple cardinality properties. + SESSION_RULE.session().execute(ScriptGraphStatement.newInstance(MULTI_PROPS)); + + // when adding a vertex with a multiple cardinality property + GraphResultSet result = + SESSION_RULE + .session() + .execute( + newInstance( + g.addV("multi_v") + .property("multi_prop", "Hello") + .property("multi_prop", "Sweet") + .property("multi_prop", "World"))); + + Vertex v = result.one().asVertex(); + assertThat(v).hasProperty("multi_prop"); + + Iterator> multiProp = v.properties("multi_prop"); + assertThat(multiProp) + .toIterable() + .extractingResultOf("value") + .containsExactly("Hello", "Sweet", "World"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/insights/InsightsClientIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/insights/InsightsClientIT.java new file mode 100644 index 00000000000..0296908be44 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/insights/InsightsClientIT.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.insights; + +import com.datastax.dse.driver.internal.core.insights.InsightsClient; +import com.datastax.dse.driver.internal.core.insights.configuration.InsightsConfiguration; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import io.netty.util.concurrent.DefaultEventExecutor; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.7.0", + description = "DSE 6.7.0 required for Insights support") +public class InsightsClientIT { + private static final StackTraceElement[] EMPTY_STACK_TRACE = {}; + + private static CustomCcmRule ccmRule = + CustomCcmRule.builder() + .withNodes(1) + .withJvmArgs( + "-Dinsights.service_options_enabled=true", + "-Dinsights.default_mode=ENABLED_WITH_LOCAL_STORAGE") + .build(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Test + public void should_send_insights_startup_event_using_client() + throws ExecutionException, InterruptedException, TimeoutException { + // given + InsightsClient insightsClient = + InsightsClient.createInsightsClient( + new InsightsConfiguration(true, 300000L, new DefaultEventExecutor()), + (InternalDriverContext) sessionRule.session().getContext(), + EMPTY_STACK_TRACE); + + // when + insightsClient.sendStartupMessage().toCompletableFuture().get(1000, TimeUnit.SECONDS); + + // then no exception + } + + @Test + public void should_send_insights_status_event_using_client() + throws ExecutionException, InterruptedException, TimeoutException { + // given + InsightsClient insightsClient = + InsightsClient.createInsightsClient( + new InsightsConfiguration(true, 300000L, new DefaultEventExecutor()), + (InternalDriverContext) sessionRule.session().getContext(), + EMPTY_STACK_TRACE); + + // when + insightsClient.sendStatusMessage().toCompletableFuture().get(1000, TimeUnit.SECONDS); + + // then no exception + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/AbstractMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/AbstractMetadataIT.java new file mode 100644 index 00000000000..ea28dc6449e --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/AbstractMetadataIT.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import java.util.Optional; +import org.junit.experimental.categories.Category; + +/* Abstract class to hold common methods for Metadata Schema tests. */ +@Category(ParallelizableTests.class) +public abstract class AbstractMetadataIT { + + /* Convenience method for executing a CQL statement using the test's Session Rule. */ + public void execute(String cql) { + getSessionRule() + .session() + .execute( + SimpleStatement.builder(cql) + .setExecutionProfile(getSessionRule().slowProfile()) + .build()); + } + + /** + * Convenience method for retrieving the Keyspace metadata from this test's Session Rule. Also + * asserts the Keyspace exists and has the expected name. + */ + public DseKeyspaceMetadata getKeyspace() { + Optional keyspace = + getSessionRule().session().getMetadata().getKeyspace(getSessionRule().keyspace()); + assertThat(keyspace) + .isPresent() + .hasValueSatisfying( + ks -> { + assertThat(ks).isInstanceOf(DseKeyspaceMetadata.class); + assertThat(ks.getName()).isEqualTo(getSessionRule().keyspace()); + }); + return ((DseKeyspaceMetadata) keyspace.get()); + } + + /* Concrete ITs should return their ClassRule SessionRule. */ + protected abstract SessionRule getSessionRule(); +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadataIT.java new file mode 100644 index 00000000000..4c899fa5e63 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadataIT.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.schema.AggregateMetadata; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.util.Objects; +import java.util.Optional; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "DSE 5.0+ required function/aggregate support") +public class DseAggregateMetadataIT extends AbstractMetadataIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); + + @Override + protected SessionRule getSessionRule() { + return DseAggregateMetadataIT.SESSION_RULE; + } + + @Test + public void should_parse_aggregate_without_deterministic() { + String cqlFunction = + "CREATE FUNCTION nondetf(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return new java.util.Random().nextInt(i);';"; + String cqlAggregate = "CREATE AGGREGATE nondeta() SFUNC nondetf STYPE int INITCOND 0;"; + execute(cqlFunction); + execute(cqlAggregate); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional aggregateOpt = keyspace.getAggregate("nondeta"); + assertThat(aggregateOpt.map(DseAggregateMetadata.class::cast)) + .hasValueSatisfying( + aggregate -> { + if (isDse6OrHigher()) { + assertThat(aggregate.getDeterministic()).contains(false); + } else { + assertThat(aggregate.getDeterministic()).isEmpty(); + } + assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); + assertThat(aggregate.describe(false)) + .isEqualTo( + String.format( + "CREATE AGGREGATE \"%s\".\"nondeta\"() SFUNC \"nondetf\" STYPE int INITCOND 0;", + keyspace.getName().asInternal())); + }); + } + + @Test + public void should_parse_aggregate_with_deterministic() { + assumeThat(isDse6OrHigher()).describedAs("DSE 6.0+ required for DETERMINISTIC").isTrue(); + String cqlFunction = + "CREATE FUNCTION detf(i int, y int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return i+y;';"; + String cqlAggregate = + "CREATE AGGREGATE deta(int) SFUNC detf STYPE int INITCOND 0 DETERMINISTIC;"; + execute(cqlFunction); + execute(cqlAggregate); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional aggregateOpt = keyspace.getAggregate("deta", DataTypes.INT); + assertThat(aggregateOpt.map(DseAggregateMetadata.class::cast)) + .hasValueSatisfying( + aggregate -> { + assertThat(aggregate.getDeterministic()).contains(true); + assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); + assertThat(aggregate.describe(false)) + .isEqualTo( + String.format( + "CREATE AGGREGATE \"%s\".\"deta\"(int) SFUNC \"detf\" STYPE int INITCOND 0 DETERMINISTIC;", + keyspace.getName().asInternal())); + }); + } + + private static boolean isDse6OrHigher() { + assumeThat(CCM_RULE.isDistributionOf(BackendType.DSE)) + .describedAs("DSE required for DseFunctionMetadata tests") + .isTrue(); + return CCM_RULE.getDistributionVersion().compareTo(DSE_6_0_0) >= 0; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadataIT.java new file mode 100644 index 00000000000..53559a66b1b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadataIT.java @@ -0,0 +1,241 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; + +import com.datastax.dse.driver.api.core.metadata.schema.DseFunctionMetadata.Monotonicity; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.metadata.schema.FunctionMetadata; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import java.util.Objects; +import java.util.Optional; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "DSE 5.0+ required function/aggregate support") +public class DseFunctionMetadataIT extends AbstractMetadataIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static final Version DSE_6_0_0 = Objects.requireNonNull(Version.parse("6.0.0")); + + @Override + public SessionRule getSessionRule() { + return DseFunctionMetadataIT.SESSION_RULE; + } + + @Test + public void should_parse_function_without_deterministic_or_monotonic() { + String cqlFunction = + "CREATE FUNCTION nondetf(i int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return new java.util.Random().nextInt(i);';"; + execute(cqlFunction); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional functionOpt = keyspace.getFunction("nondetf", DataTypes.INT); + assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) + .hasValueSatisfying( + function -> { + if (isDse6OrHigher()) { + assertThat(function.getDeterministic()).contains(false); + assertThat(function.getMonotonicity()).contains(Monotonicity.NOT_MONOTONIC); + } else { + assertThat(function.getDeterministic()).isEmpty(); + assertThat(function.getMonotonicity()).isEmpty(); + } + assertThat(function.getMonotonicArgumentNames()).isEmpty(); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); + assertThat(function.getBody()).isEqualTo("return new java.util.Random().nextInt(i);"); + assertThat(function.describe(false)) + .isEqualTo( + String.format( + "CREATE FUNCTION \"%s\".\"nondetf\"(\"i\" int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java AS 'return new java.util.Random().nextInt(i);';", + keyspace.getName().asInternal())); + }); + } + + @Test + public void should_parse_function_with_deterministic() { + assumeThat(isDse6OrHigher()) + .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") + .isTrue(); + String cqlFunction = + "CREATE FUNCTION detf(i int, y int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC LANGUAGE java AS 'return i+y;';"; + execute(cqlFunction); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional functionOpt = + keyspace.getFunction("detf", DataTypes.INT, DataTypes.INT); + assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) + .hasValueSatisfying( + function -> { + assertThat(function.getDeterministic()).contains(true); + assertThat(function.getMonotonicity()).contains(Monotonicity.NOT_MONOTONIC); + assertThat(function.getMonotonicArgumentNames()).isEmpty(); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); + assertThat(function.getBody()).isEqualTo("return i+y;"); + assertThat(function.describe(false)) + .isEqualTo( + String.format( + "CREATE FUNCTION \"%s\".\"detf\"(\"i\" int,\"y\" int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC LANGUAGE java AS 'return i+y;';", + keyspace.getName().asInternal())); + }); + } + + @Test + public void should_parse_function_with_monotonic() { + assumeThat(isDse6OrHigher()) + .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") + .isTrue(); + String cqlFunction = + "CREATE FUNCTION monotonic(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int MONOTONIC LANGUAGE java AS 'return dividend / divisor;';"; + execute(cqlFunction); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional functionOpt = + keyspace.getFunction("monotonic", DataTypes.INT, DataTypes.INT); + assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) + .hasValueSatisfying( + function -> { + assertThat(function.getDeterministic()).contains(false); + assertThat(function.getMonotonicity()).contains(Monotonicity.FULLY_MONOTONIC); + assertThat(function.getMonotonicArgumentNames()) + .containsExactly( + CqlIdentifier.fromCql("dividend"), CqlIdentifier.fromCql("divisor")); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); + assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); + assertThat(function.describe(false)) + .isEqualTo( + String.format( + "CREATE FUNCTION \"%s\".\"monotonic\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int MONOTONIC LANGUAGE java AS 'return dividend / divisor;';", + keyspace.getName().asInternal())); + }); + } + + @Test + public void should_parse_function_with_monotonic_on() { + assumeThat(isDse6OrHigher()) + .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") + .isTrue(); + String cqlFunction = + "CREATE FUNCTION monotonic_on(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';"; + execute(cqlFunction); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional functionOpt = + keyspace.getFunction("monotonic_on", DataTypes.INT, DataTypes.INT); + assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) + .hasValueSatisfying( + function -> { + assertThat(function.getDeterministic()).contains(false); + assertThat(function.getMonotonicity()).contains(Monotonicity.PARTIALLY_MONOTONIC); + assertThat(function.getMonotonicArgumentNames()) + .containsExactly(CqlIdentifier.fromCql("dividend")); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); + assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); + assertThat(function.describe(false)) + .isEqualTo( + String.format( + "CREATE FUNCTION \"%s\".\"monotonic_on\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';", + keyspace.getName().asInternal())); + }); + } + + @Test + public void should_parse_function_with_deterministic_and_monotonic() { + assumeThat(isDse6OrHigher()) + .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") + .isTrue(); + String cqlFunction = + "CREATE FUNCTION det_and_monotonic(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC LANGUAGE java AS 'return dividend / divisor;';"; + execute(cqlFunction); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional functionOpt = + keyspace.getFunction("det_and_monotonic", DataTypes.INT, DataTypes.INT); + assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) + .hasValueSatisfying( + function -> { + assertThat(function.getDeterministic()).contains(true); + assertThat(function.getMonotonicity()).contains(Monotonicity.FULLY_MONOTONIC); + assertThat(function.getMonotonicArgumentNames()) + .containsExactly( + CqlIdentifier.fromCql("dividend"), CqlIdentifier.fromCql("divisor")); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); + assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); + assertThat(function.describe(false)) + .isEqualTo( + String.format( + "CREATE FUNCTION \"%s\".\"det_and_monotonic\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC LANGUAGE java AS 'return dividend / divisor;';", + keyspace.getName().asInternal())); + }); + } + + @Test + public void should_parse_function_with_deterministic_and_monotonic_on() { + assumeThat(isDse6OrHigher()) + .describedAs("DSE 6.0+ required for DETERMINISTIC / MONOTONIC") + .isTrue(); + String cqlFunction = + "CREATE FUNCTION det_and_monotonic_on(dividend int, divisor int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';"; + execute(cqlFunction); + DseKeyspaceMetadata keyspace = getKeyspace(); + Optional functionOpt = + keyspace.getFunction("det_and_monotonic_on", DataTypes.INT, DataTypes.INT); + assertThat(functionOpt.map(DseFunctionMetadata.class::cast)) + .hasValueSatisfying( + function -> { + assertThat(function.getDeterministic()).contains(true); + assertThat(function.getMonotonicity()).contains(Monotonicity.PARTIALLY_MONOTONIC); + assertThat(function.getMonotonicArgumentNames()) + .containsExactly(CqlIdentifier.fromCql("dividend")); + assertThat(function.getLanguage()).isEqualTo("java"); + assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); + assertThat(function.getBody()).isEqualTo("return dividend / divisor;"); + assertThat(function.describe(false)) + .isEqualTo( + String.format( + "CREATE FUNCTION \"%s\".\"det_and_monotonic_on\"(\"dividend\" int,\"divisor\" int) CALLED ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC ON \"dividend\" LANGUAGE java AS 'return dividend / divisor;';", + keyspace.getName().asInternal())); + }); + } + + private static boolean isDse6OrHigher() { + assumeThat(CCM_RULE.isDistributionOf(BackendType.DSE)) + .describedAs("DSE required for DseFunctionMetadata tests") + .isTrue(); + return CCM_RULE.getDistributionVersion().compareTo(DSE_6_0_0) >= 0; + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/KeyspaceGraphMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/KeyspaceGraphMetadataIT.java new file mode 100644 index 00000000000..dc96b265140 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/KeyspaceGraphMetadataIT.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement(type = BackendType.DSE, minInclusive = "6.8") +public class KeyspaceGraphMetadataIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = + new CqlSessionRuleBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Test + public void should_expose_graph_engine_if_set() { + CqlSession session = SESSION_RULE.session(); + session.execute( + "CREATE KEYSPACE keyspace_metadata_it_graph_engine " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " + + "AND graph_engine = 'Core'"); + Metadata metadata = session.getMetadata(); + assertThat(metadata.getKeyspace("keyspace_metadata_it_graph_engine")) + .hasValueSatisfying( + keyspaceMetadata -> + assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) + .hasValue("Core")); + } + + @Test + public void should_expose_graph_engine_if_keyspace_altered() { + CqlSession session = SESSION_RULE.session(); + session.execute( + "CREATE KEYSPACE keyspace_metadata_it_graph_engine_alter " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + assertThat(session.getMetadata().getKeyspace("keyspace_metadata_it_graph_engine_alter")) + .hasValueSatisfying( + keyspaceMetadata -> + assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) + .isEmpty()); + + session.execute( + "ALTER KEYSPACE keyspace_metadata_it_graph_engine_alter WITH graph_engine = 'Core'"); + assertThat(session.getMetadata().getKeyspace("keyspace_metadata_it_graph_engine_alter")) + .hasValueSatisfying( + keyspaceMetadata -> + assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) + .hasValue("Core")); + } + + @Test + public void should_not_allow_classic_graph_engine_to_be_specified_on_keyspace() { + CqlSession session = SESSION_RULE.session(); + assertThatThrownBy( + () -> + session.execute( + "CREATE KEYSPACE keyspace_metadata_it_graph_engine_classic " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " + + "AND graph_engine = 'Classic'")) + .hasMessageContaining("Invalid/unknown graph engine name 'Classic'"); + } + + @Test + public void should_expose_core_graph_engine_if_set() { + CqlSession session = SESSION_RULE.session(); + session.execute( + "CREATE KEYSPACE keyspace_metadata_it_graph_engine_core " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} " + + "AND graph_engine = 'Core'"); + Metadata metadata = session.getMetadata(); + assertThat(metadata.getKeyspace("keyspace_metadata_it_graph_engine_core")) + .hasValueSatisfying( + keyspaceMetadata -> + assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) + .hasValue("Core")); + } + + @Test + public void should_expose_empty_graph_engine_if_not_set() { + // The default keyspace created by CcmRule has no graph engine + Metadata metadata = SESSION_RULE.session().getMetadata(); + assertThat(metadata.getKeyspace(SESSION_RULE.keyspace())) + .hasValueSatisfying( + keyspaceMetadata -> + assertThat(((DseGraphKeyspaceMetadata) keyspaceMetadata).getGraphEngine()) + .isEmpty()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataCaseSensitiveIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataCaseSensitiveIT.java new file mode 100644 index 00000000000..35242294302 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataCaseSensitiveIT.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +/** + * A regression test for a specific case of schema parsing for graphs built from tables containing + * case-sensitive column names in its tables. See JAVA-2492 for more information. + */ +@Category(ParallelizableTests.class) +@BackendRequirement(type = BackendType.DSE, minInclusive = "6.8") +public class TableGraphMetadataCaseSensitiveIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = + new CqlSessionRuleBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void createTables() { + CqlSession session = SESSION_RULE.session(); + + session.execute( + "CREATE TABLE \"Person\" (\"Name\" varchar, \"Age\" int, PRIMARY KEY ((\"Name\"), \"Age\")) WITH VERTEX LABEL"); + session.execute( + "CREATE TABLE \"Software\" (\"Name\" varchar, \"Complexity\" int, PRIMARY KEY ((\"Name\"), \"Complexity\")) WITH VERTEX LABEL"); + session.execute( + "CREATE TABLE \"Created\"" + + " (\"PersonName\" varchar, \"SoftwareName\" varchar, \"PersonAge\" int, \"SoftwareComplexity\" int, weight int," + + " primary key ((\"PersonName\"), \"SoftwareName\", weight)) WITH EDGE LABEL\n" + + " FROM \"Person\"((\"PersonName\"),\"PersonAge\")" + + " TO \"Software\"((\"SoftwareName\"),\"SoftwareComplexity\");"); + } + + @Test + public void should_expose_case_sensitive_edge_metadata() { + CqlSession session = SESSION_RULE.session(); + Metadata metadata = session.getMetadata(); + assertThat(metadata.getKeyspace(SESSION_RULE.keyspace())) + .hasValueSatisfying( + keyspaceMetadata -> + assertThat(keyspaceMetadata.getTable(CqlIdentifier.fromInternal("Created"))) + .hasValueSatisfying( + created -> { + DseGraphTableMetadata dseCreated = (DseGraphTableMetadata) created; + assertThat(dseCreated.getEdge()) + .hasValueSatisfying( + edge -> { + assertThat(edge.getFromPartitionKeyColumns()) + .isEqualTo( + ImmutableList.of( + CqlIdentifier.fromInternal("PersonName"))); + assertThat(edge.getToPartitionKeyColumns()) + .isEqualTo( + ImmutableList.of( + CqlIdentifier.fromInternal("SoftwareName"))); + assertThat(edge.getFromClusteringColumns()) + .isEqualTo( + ImmutableList.of( + CqlIdentifier.fromInternal("PersonAge"))); + assertThat(edge.getToClusteringColumns()) + .isEqualTo( + ImmutableList.of( + CqlIdentifier.fromInternal("SoftwareComplexity"))); + }); + })); + } +} diff --git a/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataIT.java b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataIT.java new file mode 100644 index 00000000000..51a2204800e --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/dse/driver/api/core/metadata/schema/TableGraphMetadataIT.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.core.metadata.schema; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.CqlSessionRuleBuilder; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement(type = BackendType.DSE, minInclusive = "6.8") +public class TableGraphMetadataIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = + new CqlSessionRuleBuilder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void createTables() { + CqlSession session = SESSION_RULE.session(); + + session.execute("CREATE TABLE person (name text PRIMARY KEY) WITH VERTEX LABEL"); + session.execute( + "CREATE TABLE software (company text, name text, version int, " + + "PRIMARY KEY ((company, name), version)) " + + "WITH VERTEX LABEL soft"); + session.execute( + "CREATE TABLE contributors (contributor text, company_name text, software_name text, " + + "software_version int, " + + "PRIMARY KEY(contributor, company_name, software_name, software_version)) " + + "WITH EDGE LABEL contrib " + + "FROM person(contributor) " + + "TO soft((company_name, software_name), software_version)"); + } + + @Test + public void should_expose_vertex_and_edge_metadata() { + CqlSession session = SESSION_RULE.session(); + Metadata metadata = session.getMetadata(); + assertThat(metadata.getKeyspace(SESSION_RULE.keyspace())) + .hasValueSatisfying( + keyspaceMetadata -> { + assertThat(keyspaceMetadata.getTable("person")) + .hasValueSatisfying( + person -> { + DseGraphTableMetadata dsePerson = (DseGraphTableMetadata) person; + assertThat(dsePerson.getVertex()) + .hasValueSatisfying( + vertex -> + assertThat(vertex.getLabelName()) + .isEqualTo(CqlIdentifier.fromInternal("person"))); + assertThat(dsePerson.getEdge()).isEmpty(); + }); + + assertThat(keyspaceMetadata.getTable("software")) + .hasValueSatisfying( + software -> { + DseGraphTableMetadata dseSoftware = (DseGraphTableMetadata) software; + assertThat(dseSoftware.getVertex()) + .hasValueSatisfying( + vertex -> + assertThat(vertex.getLabelName()) + .isEqualTo(CqlIdentifier.fromInternal("soft"))); + assertThat(dseSoftware.getEdge()).isEmpty(); + }); + + assertThat(keyspaceMetadata.getTable("contributors")) + .hasValueSatisfying( + contributors -> { + DseGraphTableMetadata dseContributors = + (DseGraphTableMetadata) contributors; + assertThat(dseContributors.getVertex()).isEmpty(); + assertThat(dseContributors.getEdge()) + .hasValueSatisfying( + edge -> { + assertThat(edge.getLabelName()) + .isEqualTo(CqlIdentifier.fromInternal("contrib")); + + assertThat(edge.getFromTable().asInternal()).isEqualTo("person"); + assertThat(edge.getFromLabel()) + .isEqualTo(CqlIdentifier.fromInternal("person")); + assertThat(edge.getFromPartitionKeyColumns()) + .containsExactly(CqlIdentifier.fromInternal("contributor")); + assertThat(edge.getFromClusteringColumns()).isEmpty(); + + assertThat(edge.getToTable().asInternal()).isEqualTo("software"); + assertThat(edge.getToLabel()) + .isEqualTo(CqlIdentifier.fromInternal("soft")); + assertThat(edge.getToPartitionKeyColumns()) + .containsExactly( + CqlIdentifier.fromInternal("company_name"), + CqlIdentifier.fromInternal("software_name")); + assertThat(edge.getToClusteringColumns()) + .containsExactly( + CqlIdentifier.fromInternal("software_version")); + }); + }); + }); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ProtocolVersionInitialNegotiationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ProtocolVersionInitialNegotiationIT.java deleted file mode 100644 index d1f228c803d..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ProtocolVersionInitialNegotiationIT.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** Covers protocol negotiation for the initial connection to the first contact point. */ -@Category(ParallelizableTests.class) -public class ProtocolVersionInitialNegotiationIT { - - @Rule public CcmRule ccm = CcmRule.getInstance(); - - @CassandraRequirement( - min = "2.1", - max = "2.2", - description = "required to downgrade to an older version") - @Test - public void should_downgrade_to_v3() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); - session.execute("select * from system.local"); - } - } - - @CassandraRequirement( - min = "2.1", - max = "2.2", - description = "required to downgrade to an older version") - @Test - public void should_fail_if_provided_version_isnt_supported() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); - session.execute("select * from system.local"); - fail("Expected an AllNodesFailedException"); - } catch (AllNodesFailedException anfe) { - Throwable cause = anfe.getErrors().values().iterator().next(); - assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); - UnsupportedProtocolVersionException unsupportedException = - (UnsupportedProtocolVersionException) cause; - assertThat(unsupportedException.getAttemptedVersions()) - .containsOnly(DefaultProtocolVersion.V4); - } - } - - @CassandraRequirement(min = "2.2", description = "required to meet default protocol version") - @Test - public void should_not_downgrade_if_server_supports_latest_version() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(4); - session.execute("select * from system.local"); - } - } - - @CassandraRequirement(min = "2.2", description = "required to use an older protocol version") - @Test - public void should_use_explicitly_provided_protocol_version() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderIT.java deleted file mode 100644 index d25e0b275ff..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderIT.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.auth; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; -import com.google.common.util.concurrent.Uninterruptibles; -import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; - -public class PlainTextAuthProviderIT { - - @ClassRule - public static CustomCcmRule ccm = - CustomCcmRule.builder() - .withCassandraConfiguration("authenticator", "PasswordAuthenticator") - .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") - .build(); - - @BeforeClass - public static void sleepForAuth() { - if (ccm.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { - // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - } - } - - @Test - public void should_connect_with_credentials() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_with_invalid_credentials() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) - .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "baduser") - .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "badpass") - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_without_credentials() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/CloudIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/CloudIT.java new file mode 100644 index 00000000000..f7990d707e4 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/CloudIT.java @@ -0,0 +1,500 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cloud; + +import static com.datastax.oss.driver.internal.core.util.LoggerTest.setupTestLogger; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.spi.ILoggingEvent; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.auth.AuthenticationException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.IsolatedTests; +import com.datastax.oss.driver.internal.core.config.cloud.CloudConfigFactory; +import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; +import com.datastax.oss.driver.internal.core.util.LoggerTest; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.NoSuchAlgorithmException; +import java.util.Collections; +import java.util.List; +import javax.net.ssl.SSLContext; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(IsolatedTests.class) +@Ignore("Disabled because it is causing trouble in Jenkins CI") +public class CloudIT { + + private static final String BUNDLE_URL_PATH = "/certs/bundles/creds.zip"; + + @ClassRule public static SniProxyRule proxyRule = new SniProxyRule(); + + // Used only to host the secure connect bundle, for tests that require external URLs + @Rule + public WireMockRule wireMockRule = + new WireMockRule(wireMockConfig().dynamicPort().dynamicHttpsPort()); + + @Test + public void should_connect_to_proxy_using_path() { + ResultSet set; + Path bundle = proxyRule.getProxy().getDefaultBundlePath(); + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withCloudSecureConnectBundle(bundle) + .build()) { + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_and_log_info_that_config_json_with_username_password_was_provided() { + ResultSet set; + Path bundle = proxyRule.getProxy().getDefaultBundlePath(); + LoggerTest.LoggerSetup logger = setupTestLogger(CloudConfigFactory.class, Level.INFO); + + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withCloudSecureConnectBundle(bundle) + .build()) { + set = session.execute("select * from system.local"); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "The bundle contains config.json with username and/or password. Providing it in the bundle is deprecated and ignored."); + } + assertThat(set).isNotNull(); + } + + @Test + public void + should_fail_with_auth_error_when_connecting_using_bundle_with_username_password_in_config_json() { + Path bundle = proxyRule.getProxy().getDefaultBundlePath(); + + // fails with auth error because username/password from config.json is ignored + AllNodesFailedException exception = null; + try { + CqlSession.builder().withCloudSecureConnectBundle(bundle).build(); + } catch (AllNodesFailedException ex) { + exception = ex; + } + assertThat(exception).isNotNull(); + List errors = exception.getAllErrors().values().iterator().next(); + Throwable firstError = errors.get(0); + assertThat(firstError).isInstanceOf(AuthenticationException.class); + } + + @Test + public void should_connect_to_proxy_without_credentials() { + ResultSet set; + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + try (CqlSession session = + CqlSession.builder() + .withCloudSecureConnectBundle(bundle) + .withAuthCredentials("cassandra", "cassandra") + .build()) { + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_to_proxy_using_non_normalized_path() { + Path bundle = proxyRule.getProxy().getBundlesRootPath().resolve("../bundles/creds-v1.zip"); + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withCloudSecureConnectBundle(bundle) + .build()) { + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_to_proxy_using_input_stream() throws IOException { + InputStream bundle = Files.newInputStream(proxyRule.getProxy().getDefaultBundlePath()); + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withCloudSecureConnectBundle(bundle) + .build()) { + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_to_proxy_using_URL() throws IOException { + // given + byte[] bundle = Files.readAllBytes(proxyRule.getProxy().getDefaultBundlePath()); + stubFor( + any(urlEqualTo(BUNDLE_URL_PATH)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/octet-stream") + .withBody(bundle))); + URL bundleUrl = + new URL(String.format("http://localhost:%d%s", wireMockRule.port(), BUNDLE_URL_PATH)); + + // when + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withCloudSecureConnectBundle(bundleUrl) + .build()) { + + // then + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_to_proxy_using_absolute_path_provided_in_the_session_setting() { + // given + String bundle = proxyRule.getProxy().getDefaultBundlePath().toString(); + DriverConfigLoader loader = + DriverConfigLoader.programmaticBuilder() + .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundle) + .build(); + // when + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withConfigLoader(loader) + .build()) { + + // then + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_to_proxy_using_non_normalized_path_provided_in_the_session_setting() { + // given + String bundle = + proxyRule.getProxy().getBundlesRootPath().resolve("../bundles/creds-v1.zip").toString(); + DriverConfigLoader loader = + DriverConfigLoader.programmaticBuilder() + .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundle) + .build(); + // when + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withConfigLoader(loader) + .build()) { + + // then + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void + should_connect_to_proxy_using_url_with_file_protocol_provided_in_the_session_setting() { + // given + String bundle = proxyRule.getProxy().getDefaultBundlePath().toString(); + DriverConfigLoader loader = + DriverConfigLoader.programmaticBuilder() + .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundle) + .build(); + // when + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withConfigLoader(loader) + .build()) { + + // then + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void should_connect_to_proxy_using_url_with_http_protocol_provided_in_the_session_setting() + throws IOException { + // given + byte[] bundle = Files.readAllBytes(proxyRule.getProxy().getDefaultBundlePath()); + stubFor( + any(urlEqualTo(BUNDLE_URL_PATH)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader("Content-Type", "application/octet-stream") + .withBody(bundle))); + String bundleUrl = String.format("http://localhost:%d%s", wireMockRule.port(), BUNDLE_URL_PATH); + DriverConfigLoader loader = + DriverConfigLoader.programmaticBuilder() + .withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, bundleUrl) + .build(); + // when + ResultSet set; + try (CqlSession session = + CqlSession.builder() + .withAuthCredentials("cassandra", "cassandra") + .withConfigLoader(loader) + .build()) { + + // then + set = session.execute("select * from system.local"); + } + assertThat(set).isNotNull(); + } + + @Test + public void + should_connect_and_log_info_when_contact_points_and_secure_bundle_used_programmatic() { + // given + LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); + + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + + try (CqlSession session = + CqlSession.builder() + .withCloudSecureConnectBundle(bundle) + .addContactPoint(new InetSocketAddress("127.0.0.1", 9042)) + .withAuthCredentials("cassandra", "cassandra") + .build(); ) { + + // when + ResultSet set = session.execute("select * from system.local"); + // then + assertThat(set).isNotNull(); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); + + } finally { + logger.close(); + } + } + + @Test + public void should_connect_and_log_info_when_contact_points_and_secure_bundle_used_config() { + // given + LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); + + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.CONTACT_POINTS, Collections.singletonList("localhost:9042")) + .build(); + + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + + try (CqlSession session = + CqlSession.builder() + .withConfigLoader(loader) + .withCloudSecureConnectBundle(bundle) + .withAuthCredentials("cassandra", "cassandra") + .build(); ) { + + // when + ResultSet set = session.execute("select * from system.local"); + // then + assertThat(set).isNotNull(); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "Both a secure connect bundle and contact points were provided. These are mutually exclusive. The contact points from the secure bundle will have priority."); + + } finally { + logger.close(); + } + } + + @Test + public void should_connect_and_log_info_when_ssl_context_and_secure_bundle_used_programmatic() + throws NoSuchAlgorithmException { + // given + LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); + + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + + try (CqlSession session = + CqlSession.builder() + .withCloudSecureConnectBundle(bundle) + .withAuthCredentials("cassandra", "cassandra") + .withSslContext(SSLContext.getInstance("SSL")) + .build()) { + // when + ResultSet set = session.execute("select * from system.local"); + // then + assertThat(set).isNotNull(); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); + } finally { + logger.close(); + } + } + + @Test + public void should_error_when_ssl_context_and_secure_bundle_used_config() + throws NoSuchAlgorithmException { + // given + LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); + + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.RECONNECT_ON_INIT, true) + .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) + .build(); + + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + + try (CqlSession session = + CqlSession.builder() + .withConfigLoader(loader) + .withCloudSecureConnectBundle(bundle) + .withAuthCredentials("cassandra", "cassandra") + .build()) { + // when + ResultSet set = session.execute("select * from system.local"); + // then + assertThat(set).isNotNull(); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "Both a secure connect bundle and SSL options were provided. They are mutually exclusive. The SSL options from the secure bundle will have priority."); + } finally { + logger.close(); + } + } + + @Test + public void + should_connect_and_log_info_when_local_data_center_and_secure_bundle_used_programmatic() { + // given + LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); + + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc-ignore") + .build(); + + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + + try (CqlSession session = + CqlSession.builder() + .withCloudSecureConnectBundle(bundle) + .withConfigLoader(loader) + .withAuthCredentials("cassandra", "cassandra") + .build(); ) { + + // when + ResultSet set = session.execute("select * from system.local"); + // then + assertThat(set).isNotNull(); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); + + } finally { + logger.close(); + } + } + + @Test + public void should_connect_and_log_info_when_local_data_center_and_secure_bundle_used_config() { + // given + LoggerTest.LoggerSetup logger = setupTestLogger(SessionBuilder.class, Level.INFO); + + Path bundle = proxyRule.getProxy().getBundleWithoutCredentialsPath(); + + try (CqlSession session = + CqlSession.builder() + .withCloudSecureConnectBundle(bundle) + .withLocalDatacenter("dc-ignored") + .withAuthCredentials("cassandra", "cassandra") + .build(); ) { + + // when + ResultSet set = session.execute("select * from system.local"); + // then + assertThat(set).isNotNull(); + verify(logger.appender, timeout(500).atLeast(1)) + .doAppend(logger.loggingEventCaptor.capture()); + assertThat( + logger.loggingEventCaptor.getAllValues().stream() + .map(ILoggingEvent::getFormattedMessage)) + .contains( + "Both a secure connect bundle and a local datacenter were provided. They are mutually exclusive. The local datacenter from the secure bundle will have priority."); + + } finally { + logger.close(); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyRule.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyRule.java new file mode 100644 index 00000000000..fa009de78ae --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyRule.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cloud; + +import org.junit.rules.ExternalResource; + +public class SniProxyRule extends ExternalResource { + + private final SniProxyServer proxy; + + public SniProxyRule() { + proxy = new SniProxyServer(); + } + + @Override + protected void before() { + proxy.startProxy(); + } + + @Override + protected void after() { + proxy.stopProxy(); + } + + public SniProxyServer getProxy() { + return proxy; + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyServer.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyServer.java new file mode 100644 index 00000000000..809354a7daf --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cloud/SniProxyServer.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.core.cloud; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; +import org.apache.commons.exec.CommandLine; +import org.apache.commons.exec.DefaultExecutor; +import org.apache.commons.exec.ExecuteStreamHandler; +import org.apache.commons.exec.ExecuteWatchdog; +import org.apache.commons.exec.Executor; +import org.apache.commons.exec.LogOutputStream; +import org.apache.commons.exec.PumpStreamHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SniProxyServer { + + private static final Logger LOG = LoggerFactory.getLogger(SniProxyServer.class); + + private final Path proxyPath; + private final Path bundlesRootPath; + private final Path defaultBundlePath; + private final Path bundleWithoutCredentialsPath; + private final Path bundleWithoutClientCertificatesPath; + private final Path bundleWithInvalidCAPath; + private final Path bundleWithUnreachableMetadataServicePath; + + private volatile boolean running = false; + + public SniProxyServer() { + this(Paths.get(System.getProperty("proxy.path", "./"))); + } + + public SniProxyServer(Path proxyPath) { + this.proxyPath = proxyPath.normalize().toAbsolutePath(); + bundlesRootPath = proxyPath.resolve("certs/bundles/"); + defaultBundlePath = bundlesRootPath.resolve("creds-v1.zip"); + bundleWithoutCredentialsPath = bundlesRootPath.resolve("creds-v1-wo-creds.zip"); + bundleWithoutClientCertificatesPath = bundlesRootPath.resolve("creds-v1-wo-cert.zip"); + bundleWithInvalidCAPath = bundlesRootPath.resolve("creds-v1-invalid-ca.zip"); + bundleWithUnreachableMetadataServicePath = bundlesRootPath.resolve("creds-v1-unreachable.zip"); + } + + public void startProxy() { + CommandLine run = CommandLine.parse(proxyPath + "/run.sh"); + execute(run); + running = true; + } + + public void stopProxy() { + if (running) { + CommandLine findImageId = + CommandLine.parse("docker ps -a -q --filter ancestor=single_endpoint"); + String id = execute(findImageId); + CommandLine stop = CommandLine.parse("docker kill " + id); + execute(stop); + running = false; + } + } + + /** @return The root folder of the SNI proxy server docker image. */ + public Path getProxyPath() { + return proxyPath; + } + + /** + * @return The root folder where secure connect bundles exposed by this SNI proxy for testing + * purposes can be found. + */ + public Path getBundlesRootPath() { + return bundlesRootPath; + } + + /** + * @return The default secure connect bundle. It contains credentials and all certificates + * required to connect. + */ + public Path getDefaultBundlePath() { + return defaultBundlePath; + } + + /** @return A secure connect bundle without credentials in config.json. */ + public Path getBundleWithoutCredentialsPath() { + return bundleWithoutCredentialsPath; + } + + /** @return A secure connect bundle without client certificates (no identity.jks). */ + public Path getBundleWithoutClientCertificatesPath() { + return bundleWithoutClientCertificatesPath; + } + + /** @return A secure connect bundle with an invalid Certificate Authority. */ + public Path getBundleWithInvalidCAPath() { + return bundleWithInvalidCAPath; + } + + /** @return A secure connect bundle with an invalid address for the Proxy Metadata Service. */ + public Path getBundleWithUnreachableMetadataServicePath() { + return bundleWithUnreachableMetadataServicePath; + } + + private String execute(CommandLine cli) { + LOG.debug("Executing: " + cli); + ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + try (LogOutputStream errStream = + new LogOutputStream() { + @Override + protected void processLine(String line, int logLevel) { + LOG.error("sniendpointerr> {}", line); + } + }) { + Executor executor = new DefaultExecutor(); + ExecuteStreamHandler streamHandler = new PumpStreamHandler(outStream, errStream); + executor.setStreamHandler(streamHandler); + executor.setWatchdog(watchDog); + executor.setWorkingDirectory(proxyPath.toFile()); + int retValue = executor.execute(cli); + if (retValue != 0) { + LOG.error("Non-zero exit code ({}) returned from executing ccm command: {}", retValue, cli); + } + return outStream.toString(); + } catch (IOException ex) { + if (watchDog.killedProcess()) { + throw new RuntimeException("The command '" + cli + "' was killed after 10 minutes"); + } else { + throw new RuntimeException("The command '" + cli + "' failed to execute", ex); + } + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/QueryTraceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/QueryTraceIT.java deleted file mode 100644 index f01007ce3e1..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/QueryTraceIT.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.cql; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.net.InetSocketAddress; -import org.hamcrest.Description; -import org.hamcrest.TypeSafeMatcher; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class QueryTraceIT { - - private static CcmRule ccmRule = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Rule public ExpectedException thrown = ExpectedException.none(); - - @Test - public void should_not_have_tracing_id_when_tracing_disabled() { - ExecutionInfo executionInfo = - sessionRule - .session() - .execute("SELECT release_version FROM system.local") - .getExecutionInfo(); - - assertThat(executionInfo.getTracingId()).isNull(); - - // Should get a DriverExecutionException with an underlying IllegalStateException indicating - // Tracing was disabled. - thrown.expect(DriverExecutionException.class); - String expectedMessage = "Tracing was disabled for this request"; - thrown.expectCause( - new TypeSafeMatcher() { - @Override - public void describeTo(Description description) { - description.appendText( - "Expected IllegalStateException with message of '" + expectedMessage + "'"); - } - - @Override - protected boolean matchesSafely(Throwable item) { - return item instanceof IllegalStateException - && item.getMessage().equals(expectedMessage); - } - }); - executionInfo.getQueryTrace(); - } - - @Test - public void should_fetch_trace_when_tracing_enabled() { - ExecutionInfo executionInfo = - sessionRule - .session() - .execute( - SimpleStatement.builder("SELECT release_version FROM system.local") - .setTracing() - .build()) - .getExecutionInfo(); - - assertThat(executionInfo.getTracingId()).isNotNull(); - - QueryTrace queryTrace = executionInfo.getQueryTrace(); - assertThat(queryTrace.getTracingId()).isEqualTo(executionInfo.getTracingId()); - assertThat(queryTrace.getRequestType()).isEqualTo("Execute CQL3 query"); - assertThat(queryTrace.getDurationMicros()).isPositive(); - EndPoint contactPoint = ccmRule.getContactPoints().iterator().next(); - assertThat(queryTrace.getCoordinator()) - .isEqualTo(((InetSocketAddress) contactPoint.resolve()).getAddress()); - assertThat(queryTrace.getParameters()) - .containsEntry("consistency_level", "LOCAL_ONE") - .containsEntry("page_size", "5000") - .containsEntry("query", "SELECT release_version FROM system.local") - .containsEntry("serial_consistency_level", "SERIAL"); - assertThat(queryTrace.getStartedAt()).isPositive(); - // Don't want to get too deep into event testing because that could change across versions - assertThat(queryTrace.getEvents()).isNotEmpty(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/ByteOrderedTokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/ByteOrderedTokenIT.java deleted file mode 100644 index 357c1078f99..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/ByteOrderedTokenIT.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class ByteOrderedTokenIT extends TokenITBase { - - private static CustomCcmRule ccmRule = - CustomCcmRule.builder().withNodes(3).withCreateOption("-p ByteOrderedPartitioner").build(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - public ByteOrderedTokenIT() { - super("org.apache.cassandra.dht.ByteOrderedPartitioner", ByteOrderedToken.class, false); - } - - @Override - protected CqlSession session() { - return sessionRule.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(sessionRule.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/DescribeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/DescribeIT.java deleted file mode 100644 index 94fb74ae1a8..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/DescribeIT.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.google.common.io.ByteStreams; -import com.google.common.io.Closer; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintStream; -import java.time.Duration; -import java.util.Optional; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Category(ParallelizableTests.class) -public class DescribeIT { - - private static final Logger logger = LoggerFactory.getLogger(DescribeIT.class); - - private static CcmRule ccmRule = CcmRule.getInstance(); - - // disable debouncer to speed up test. - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(0)) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - /** - * Creates a keyspace using a variety of features and ensures {@link - * com.datastax.oss.driver.api.core.metadata.schema.Describable#describe(boolean)} contains the - * expected data in the expected order. This is not exhaustive, but covers quite a bit of - * different scenarios (materialized views, aggregates, functions, nested UDTs, etc.). - * - *

        The test also verifies that the generated schema is the same whether the keyspace and its - * schema was created during the lifecycle of the cluster or before connecting. - * - *

        Note that this test might be fragile in the future if default option values change in - * cassandra. In order to deal with new features, we create a schema for each tested C* version, - * and if one is not present the test is failed. - */ - @Test - public void create_schema_and_ensure_exported_cql_is_as_expected() { - CqlIdentifier keyspace = SessionUtils.uniqueKeyspaceId(); - String keyspaceAsCql = keyspace.asCql(true); - String expectedCql = getExpectedCqlString(keyspaceAsCql); - - CqlSession session = sessionRule.session(); - - // create keyspace - session.execute( - String.format( - "CREATE KEYSPACE %s " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - keyspace)); - - // connect session to this keyspace. - session.execute(String.format("USE %s", keyspace.asCql(false))); - - Optional originalKsMeta = session.getMetadata().getKeyspace(keyspace); - - // Usertype 'ztype' with two columns. Given name to ensure that even though it has an - // alphabetically later name, it shows up before other user types ('ctype') that depend on it. - session.execute("CREATE TYPE ztype(c text, a int)"); - - // Usertype 'xtype' with two columns. At same level as 'ztype' since both are depended on by - // ctype, should show up before 'ztype' because it's alphabetically before, even though it was - // created after. - session.execute("CREATE TYPE xtype(d text)"); - - // Usertype 'ctype' which depends on both ztype and xtype, therefore ztype and xtype should show - // up earlier. - session.execute( - String.format( - "CREATE TYPE ctype(z frozen<%s.ztype>, x frozen<%s.xtype>)", - keyspaceAsCql, keyspaceAsCql)); - - // Usertype 'btype' which has no dependencies, should show up before 'xtype' and 'ztype' since - // it's alphabetically before. - session.execute("CREATE TYPE btype(a text)"); - - // Usertype 'atype' which depends on 'ctype', so should show up after 'ctype', 'xtype' and - // 'ztype'. - session.execute(String.format("CREATE TYPE atype(c frozen<%s.ctype>)", keyspaceAsCql)); - - // A simple table with a udt column and LCS compaction strategy. - session.execute( - String.format( - "CREATE TABLE ztable(zkey text, a frozen<%s.atype>, PRIMARY KEY(zkey)) " - + "WITH compaction = {'class' : 'LeveledCompactionStrategy', 'sstable_size_in_mb' : 95}", - keyspaceAsCql)); - - // date type requries 2.2+ - if (ccmRule.getCassandraVersion().compareTo(Version.V2_2_0) >= 0) { - // A table that will have materialized views (copied from mv docs) - session.execute( - "CREATE TABLE cyclist_mv(cid uuid, name text, age int, birthday date, country text, " - + "PRIMARY KEY(cid))"); - - // index on table with view, index should be printed first. - session.execute("CREATE INDEX cyclist_by_country ON cyclist_mv(country)"); - - // materialized views require 3.0+ - if (ccmRule.getCassandraVersion().compareTo(Version.V3_0_0) >= 0) { - // A materialized view for cyclist_mv, reverse clustering. created first to ensure creation - // order does not matter, alphabetical does. - session.execute( - "CREATE MATERIALIZED VIEW cyclist_by_r_age " - + "AS SELECT age, birthday, name, country " - + "FROM cyclist_mv " - + "WHERE age IS NOT NULL AND cid IS NOT NULL " - + "PRIMARY KEY (age, cid) " - + "WITH CLUSTERING ORDER BY (cid DESC)"); - - // A materialized view for cyclist_mv, select * - session.execute( - "CREATE MATERIALIZED VIEW cyclist_by_a_age " - + "AS SELECT * " - + "FROM cyclist_mv " - + "WHERE age IS NOT NULL AND cid IS NOT NULL " - + "PRIMARY KEY (age, cid)"); - - // A materialized view for cyclist_mv, select columns - session.execute( - "CREATE MATERIALIZED VIEW cyclist_by_age " - + "AS SELECT age, birthday, name, country " - + "FROM cyclist_mv " - + "WHERE age IS NOT NULL AND cid IS NOT NULL " - + "PRIMARY KEY (age, cid) WITH comment = 'simple view'"); - } - } - - // A table with a secondary index, taken from documentation on secondary index. - session.execute( - "CREATE TABLE rank_by_year_and_name(race_year int, race_name text, rank int, cyclist_name text, " - + "PRIMARY KEY((race_year, race_name), rank))"); - - session.execute("CREATE INDEX ryear ON rank_by_year_and_name(race_year)"); - - session.execute("CREATE INDEX rrank ON rank_by_year_and_name(rank)"); - - // udfs and udas require 2.22+ - if (ccmRule.getCassandraVersion().compareTo(Version.V2_2_0) >= 0) { - // UDFs - session.execute( - "CREATE OR REPLACE FUNCTION avgState ( state tuple, val int ) CALLED ON NULL INPUT RETURNS tuple LANGUAGE java AS \n" - + " 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;';"); - session.execute( - "CREATE OR REPLACE FUNCTION avgFinal ( state tuple ) CALLED ON NULL INPUT RETURNS double LANGUAGE java AS \n" - + " 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);';"); - - // UDAs - session.execute( - "CREATE AGGREGATE IF NOT EXISTS mean ( int ) \n" - + "SFUNC avgState STYPE tuple FINALFUNC avgFinal INITCOND (0,0);"); - session.execute( - "CREATE AGGREGATE IF NOT EXISTS average ( int ) \n" - + "SFUNC avgState STYPE tuple FINALFUNC avgFinal INITCOND (0,0);"); - } - - // Since metadata is immutable, do not expect anything in the original keyspace meta. - assertThat(originalKsMeta).isPresent(); - - assertThat(originalKsMeta.get().getTables()).isEmpty(); - assertThat(originalKsMeta.get().getViews()).isEmpty(); - assertThat(originalKsMeta.get().getFunctions()).isEmpty(); - assertThat(originalKsMeta.get().getAggregates()).isEmpty(); - assertThat(originalKsMeta.get().getUserDefinedTypes()).isEmpty(); - - // validate that the exported schema matches what was expected exactly. - Optional ks = sessionRule.session().getMetadata().getKeyspace(keyspace); - assertThat(ks.get().describeWithChildren(true).trim()).isEqualTo(expectedCql); - - // Also validate that when you create a Session with schema already created that the exported - // string is the same. - try (CqlSession newSession = SessionUtils.newSession(ccmRule)) { - ks = newSession.getMetadata().getKeyspace(keyspace); - assertThat(ks.get().describeWithChildren(true).trim()).isEqualTo(expectedCql); - } - } - - private String getExpectedCqlString(String keyspace) { - String majorMinor = - ccmRule.getCassandraVersion().getMajor() + "." + ccmRule.getCassandraVersion().getMinor(); - String resourceName = "/describe_it_test_" + majorMinor + ".cql"; - - Closer closer = Closer.create(); - try { - InputStream is = DescribeIT.class.getResourceAsStream(resourceName); - if (is == null) { - // If no schema file is defined for tested cassandra version, just try 3.11. - if (ccmRule.getCassandraVersion().compareTo(Version.V3_0_0) >= 0) { - logger.warn("Could not find schema file for {}, assuming C* 3.11.x", majorMinor); - is = DescribeIT.class.getResourceAsStream("/describe_it_test_3.11.cql"); - if (is == null) { - throw new IOException(); - } - } - } - - closer.register(is); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - PrintStream ps = new PrintStream(baos); - ByteStreams.copy(is, ps); - return baos.toString().replaceAll("ks_0", keyspace).trim(); - } catch (IOException e) { - logger.warn("Failure to read {}", resourceName, e); - fail("Unable to read " + resourceName + " is it defined?", e); - } finally { - try { - closer.close(); - } catch (IOException e) { // no op - logger.warn("Failure closing streams", e); - } - } - return ""; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/Murmur3TokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/Murmur3TokenVnodesIT.java deleted file mode 100644 index ed384765f9d..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/Murmur3TokenVnodesIT.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class Murmur3TokenVnodesIT extends TokenITBase { - - private static CustomCcmRule ccmRule = - CustomCcmRule.builder().withNodes(3).withCreateOption("--vnodes").build(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - public Murmur3TokenVnodesIT() { - super("org.apache.cassandra.dht.Murmur3Partitioner", Murmur3Token.class, true); - } - - @Override - protected CqlSession session() { - return sessionRule.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(sessionRule.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/RandomTokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/RandomTokenIT.java deleted file mode 100644 index bdfad30aee0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/RandomTokenIT.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; -import java.time.Duration; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class RandomTokenIT extends TokenITBase { - - private static CustomCcmRule ccmRule = - CustomCcmRule.builder().withNodes(3).withCreateOption("-p RandomPartitioner").build(); - - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withKeyspace(false) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .build()) - .build(); - - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - public RandomTokenIT() { - super("org.apache.cassandra.dht.RandomPartitioner", RandomToken.class, false); - } - - @Override - protected CqlSession session() { - return sessionRule.session(); - } - - @BeforeClass - public static void createSchema() { - TokenITBase.createSchema(sessionRule.session()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaIT.java deleted file mode 100644 index 96f13aa8141..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaIT.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; -import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.protocol.internal.util.Bytes; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; -import org.junit.AssumptionViolatedException; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class SchemaIT { - - private static final Version DSE_MIN_VIRTUAL_TABLES = Version.parse("6.7.0"); - - private CcmRule ccmRule = CcmRule.getInstance(); - - private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - public void should_expose_system_and_test_keyspace() { - Map keyspaces = - sessionRule.session().getMetadata().getKeyspaces(); - assertThat(keyspaces) - .containsKeys( - // Don't test exhaustively because system keyspaces depend on the Cassandra version, and - // keyspaces from other tests might also be present - CqlIdentifier.fromInternal("system"), - CqlIdentifier.fromInternal("system_traces"), - sessionRule.keyspace()); - assertThat(keyspaces.get(CqlIdentifier.fromInternal("system")).getTables()) - .containsKeys(CqlIdentifier.fromInternal("local"), CqlIdentifier.fromInternal("peers")); - } - - @Test - public void should_filter_by_keyspaces() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, - Collections.singletonList(sessionRule.keyspace().asInternal())) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.getMetadata().getKeyspaces()).containsOnlyKeys(sessionRule.keyspace()); - - CqlIdentifier otherKeyspace = SessionUtils.uniqueKeyspaceId(); - SessionUtils.createKeyspace(session, otherKeyspace); - - assertThat(session.getMetadata().getKeyspaces()).containsOnlyKeys(sessionRule.keyspace()); - } - } - - @Test - public void should_not_load_schema_if_disabled_in_config() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - assertThat(session.getMetadata().getKeyspaces()).isEmpty(); - } - } - - @Test - public void should_enable_schema_programmatically_when_disabled_in_config() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - assertThat(session.getMetadata().getKeyspaces()).isEmpty(); - - session.setSchemaMetadataEnabled(true); - assertThat(session.isSchemaMetadataEnabled()).isTrue(); - - ConditionChecker.checkThat( - () -> assertThat(session.getMetadata().getKeyspaces()).isNotEmpty()) - .becomesTrue(); - assertThat(session.getMetadata().getKeyspaces()) - .containsKeys( - CqlIdentifier.fromInternal("system"), - CqlIdentifier.fromInternal("system_traces"), - sessionRule.keyspace()); - - session.setSchemaMetadataEnabled(null); - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - } - } - - @Test - public void should_disable_schema_programmatically_when_enabled_in_config() { - CqlSession session = sessionRule.session(); - session.setSchemaMetadataEnabled(false); - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - - // Create a table, metadata should not be updated - DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); - sessionRule - .session() - .execute( - SimpleStatement.builder("CREATE TABLE foo(k int primary key)") - .setExecutionProfile(slowProfile) - .build()); - assertThat(session.getMetadata().getKeyspace(sessionRule.keyspace()).get().getTables()) - .doesNotContainKey(CqlIdentifier.fromInternal("foo")); - - // Reset to config value (true), should refresh and load the new table - session.setSchemaMetadataEnabled(null); - assertThat(session.isSchemaMetadataEnabled()).isTrue(); - ConditionChecker.checkThat( - () -> - assertThat( - session.getMetadata().getKeyspace(sessionRule.keyspace()).get().getTables()) - .containsKey(CqlIdentifier.fromInternal("foo"))) - .becomesTrue(); - } - - @Test - public void should_refresh_schema_manually() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - - assertThat(session.isSchemaMetadataEnabled()).isFalse(); - assertThat(session.getMetadata().getKeyspaces()).isEmpty(); - - Metadata newMetadata = session.refreshSchema(); - assertThat(newMetadata.getKeyspaces()) - .containsKeys( - CqlIdentifier.fromInternal("system"), - CqlIdentifier.fromInternal("system_traces"), - sessionRule.keyspace()); - - assertThat(session.getMetadata()).isSameAs(newMetadata); - } - } - - @CassandraRequirement(min = "4.0", description = "virtual tables introduced in 4.0") - @Test - public void should_get_virtual_metadata() { - skipIfDse60(); - - Metadata md = sessionRule.session().getMetadata(); - KeyspaceMetadata kmd = md.getKeyspace("system_views").get(); - - // Keyspace name should be set, marked as virtual, and have at least sstable_tasks table. - // All other values should be defaulted since they are not defined in the virtual schema tables. - assertThat(kmd.getTables().size()).isGreaterThanOrEqualTo(1); - assertThat(kmd.isVirtual()).isTrue(); - assertThat(kmd.isDurableWrites()).isFalse(); - assertThat(kmd.getName().asCql(true)).isEqualTo("system_views"); - - // Virtual tables lack User Types, Functions, Views and Aggregates - assertThat(kmd.getUserDefinedTypes().size()).isEqualTo(0); - assertThat(kmd.getFunctions().size()).isEqualTo(0); - assertThat(kmd.getViews().size()).isEqualTo(0); - assertThat(kmd.getAggregates().size()).isEqualTo(0); - - assertThat(kmd.describe(true)) - .isEqualTo( - "/* VIRTUAL KEYSPACE system_views WITH replication = { 'class' : 'null' } " - + "AND durable_writes = false; */"); - // Table name should be set, marked as virtual, and it should have columns set. - // indexes, views, clustering column, clustering order and id are not defined in the virtual - // schema tables. - TableMetadata tm = kmd.getTable("sstable_tasks").get(); - assertThat(tm).isNotNull(); - assertThat(tm.getName().toString()).isEqualTo("sstable_tasks"); - assertThat(tm.isVirtual()).isTrue(); - assertThat(tm.getColumns().size()).isEqualTo(7); - assertThat(tm.getIndexes().size()).isEqualTo(0); - assertThat(tm.getPartitionKey().size()).isEqualTo(1); - assertThat(tm.getPartitionKey().get(0).getName().toString()).isEqualTo("keyspace_name"); - assertThat(tm.getClusteringColumns().size()).isEqualTo(2); - assertThat(tm.getId().isPresent()).isFalse(); - assertThat(tm.getOptions().size()).isEqualTo(0); - assertThat(tm.getKeyspace()).isEqualTo(kmd.getName()); - assertThat(tm.describe(true)) - .isEqualTo( - "/* VIRTUAL TABLE system_views.sstable_tasks (\n" - + " keyspace_name text,\n" - + " table_name text,\n" - + " task_id uuid,\n" - + " kind text,\n" - + " progress bigint,\n" - + " total bigint,\n" - + " unit text,\n" - + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" - + "); */"); - // ColumnMetadata is as expected - ColumnMetadata cm = tm.getColumn("progress").get(); - assertThat(cm).isNotNull(); - assertThat(cm.getParent()).isEqualTo(tm.getName()); - assertThat(cm.getType()).isEqualTo(DataTypes.BIGINT); - assertThat(cm.getName().toString()).isEqualTo("progress"); - } - - @CassandraRequirement(min = "4.0", description = "virtual tables introduced in 4.0") - @Test - public void should_exclude_virtual_keyspaces_from_token_map() { - skipIfDse60(); - - Metadata metadata = sessionRule.session().getMetadata(); - Map keyspaces = metadata.getKeyspaces(); - assertThat(keyspaces) - .containsKey(CqlIdentifier.fromCql("system_views")) - .containsKey(CqlIdentifier.fromCql("system_virtual_schema")); - - TokenMap tokenMap = metadata.getTokenMap().orElseThrow(AssertionError::new); - ByteBuffer partitionKey = Bytes.fromHexString("0x00"); // value does not matter - assertThat(tokenMap.getReplicas("system_views", partitionKey)).isEmpty(); - assertThat(tokenMap.getReplicas("system_virtual_schema", partitionKey)).isEmpty(); - // Check that a non-virtual keyspace is present - assertThat(tokenMap.getReplicas(sessionRule.keyspace(), partitionKey)).isNotEmpty(); - } - - private void skipIfDse60() { - // Special case: DSE 6.0 reports C* 4.0 but does not support virtual tables - if (ccmRule.getDseVersion().isPresent()) { - Version dseVersion = ccmRule.getDseVersion().get(); - if (dseVersion.compareTo(DSE_MIN_VIRTUAL_TABLES) < 0) { - throw new AssumptionViolatedException("DSE 6.0 does not support virtual tables"); - } - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/TableOptionsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/TableOptionsIT.java deleted file mode 100644 index 13e4e314df3..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/TableOptionsIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metadata; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.categories.ParallelizableTests; -import java.time.Duration; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -@Category(ParallelizableTests.class) -public class TableOptionsIT { - private static final CqlIdentifier READ_REPAIR_KEY = CqlIdentifier.fromCql("read_repair"); - private static final CqlIdentifier ADDITIONAL_WRITE_POLICY_KEY = - CqlIdentifier.fromCql("additional_write_policy"); - - private static CcmRule ccmRule = CcmRule.getInstance(); - // disable debouncer to speed up test. - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) - .withConfigLoader( - SessionUtils.configLoaderBuilder() - .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(0)) - .build()) - .build(); - - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - - @Test - @CassandraRequirement(min = "4.0", description = "This test covers Cassandra 4+ features") - public void should_handle_cassandra4_table_options() { - CqlSession session = sessionRule.session(); - - // A simple table with read_repair and additional_write_policy options - session.execute( - "CREATE TABLE foo(k int, a text, PRIMARY KEY(k)) " - + "WITH read_repair='NONE' AND additional_write_policy='40p'"); - - TableMetadata fooMetadata = - session - .getMetadata() - .getKeyspace(sessionRule.keyspace()) - .orElseThrow(AssertionError::new) - .getTable("foo") - .orElseThrow(AssertionError::new); - - assertThat(fooMetadata.getOptions()) - .containsEntry(READ_REPAIR_KEY, "NONE") - .containsEntry(ADDITIONAL_WRITE_POLICY_KEY, "40p"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metrics/MetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metrics/MetricsIT.java deleted file mode 100644 index 5ac70b54b7b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metrics/MetricsIT.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.metrics; - -import static org.assertj.core.api.Assertions.assertThat; - -import com.codahale.metrics.Meter; -import com.codahale.metrics.Timer; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; -import com.datastax.oss.driver.categories.ParallelizableTests; -import com.google.common.collect.Lists; -import java.util.Collections; -import java.util.concurrent.TimeUnit; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(ParallelizableTests.class) -public class MetricsIT { - - @ClassRule public static CcmRule ccmRule = CcmRule.getInstance(); - - @Test - public void should_expose_metrics() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - Collections.singletonList("cql-requests")) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - for (int i = 0; i < 10; i++) { - session.execute("SELECT release_version FROM system.local"); - } - - // Should have 10 requests, check within 5 seconds as metric increments after - // caller is notified. - ConditionChecker.checkThat( - () -> { - assertThat(session.getMetrics()) - .hasValueSatisfying( - metrics -> - assertThat( - metrics.getSessionMetric( - DefaultSessionMetric.CQL_REQUESTS)) - .hasValueSatisfying( - cqlRequests -> { - // No need to be very sophisticated, metrics are already - // covered individually in unit tests. - assertThat(cqlRequests.getCount()).isEqualTo(10); - })); - }) - .before(5, TimeUnit.SECONDS); - } - } - - @Test - public void should_expose_bytes_sent_and_received() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - Lists.newArrayList("bytes-sent", "bytes-received")) - .withStringList( - DefaultDriverOption.METRICS_NODE_ENABLED, - Lists.newArrayList("bytes-sent", "bytes-received")) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - for (int i = 0; i < 10; i++) { - session.execute("SELECT release_version FROM system.local"); - } - - assertThat(session.getMetrics()) - .hasValueSatisfying( - metrics -> { - assertThat(metrics.getSessionMetric(DefaultSessionMetric.BYTES_SENT)) - .hasValueSatisfying( - // Can't be precise here as payload can be dependent on protocol version. - bytesSent -> assertThat(bytesSent.getCount()).isGreaterThan(0)); - assertThat(metrics.getSessionMetric(DefaultSessionMetric.BYTES_RECEIVED)) - .hasValueSatisfying( - bytesReceived -> assertThat(bytesReceived.getCount()).isGreaterThan(0)); - - // get only node in cluster and evaluate its metrics. - Node node = session.getMetadata().getNodes().values().iterator().next(); - assertThat(metrics.getNodeMetric(node, DefaultNodeMetric.BYTES_SENT)) - .hasValueSatisfying( - bytesSent -> assertThat(bytesSent.getCount()).isGreaterThan(0)); - assertThat(metrics.getNodeMetric(node, DefaultNodeMetric.BYTES_RECEIVED)) - .hasValueSatisfying( - bytesReceived -> assertThat(bytesReceived.getCount()).isGreaterThan(0)); - }); - } - } - - @Test - public void should_not_expose_metrics_if_disabled() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withStringList(DefaultDriverOption.METRICS_SESSION_ENABLED, Collections.emptyList()) - .withStringList(DefaultDriverOption.METRICS_NODE_ENABLED, Collections.emptyList()) - .build(); - try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - for (int i = 0; i < 10; i++) { - session.execute("SELECT release_version FROM system.local"); - } - - assertThat(session.getMetrics()).isEmpty(); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryIT.java deleted file mode 100644 index cdc972f2dce..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryIT.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.core.ssl; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import org.junit.ClassRule; -import org.junit.Test; - -public class DefaultSslEngineFactoryIT { - - @ClassRule public static CustomCcmRule ccm = CustomCcmRule.builder().withSsl().build(); - - @Test - public void should_connect_with_ssl() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_if_hostname_validation_enabled_and_hostname_does_not_match() { - // should not succeed as certificate does not have a CN that would match hostname, - // (unless hostname is node1). - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PATH, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) - .withString( - DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, - CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_if_truststore_not_provided() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) - .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - session.execute("select * from system.local"); - } - } - - @Test(expected = AllNodesFailedException.class) - public void should_not_connect_if_not_using_ssl() { - try (CqlSession session = SessionUtils.newSession(ccm)) { - session.execute("select * from system.local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/querybuilder/JacksonJsonCodec.java b/integration-tests/src/test/java/com/datastax/oss/driver/api/querybuilder/JacksonJsonCodec.java deleted file mode 100644 index 37fb471774b..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/querybuilder/JacksonJsonCodec.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.api.querybuilder; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.internal.core.util.Strings; -import com.datastax.oss.protocol.internal.util.Bytes; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JavaType; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.type.TypeFactory; -import edu.umd.cs.findbugs.annotations.NonNull; -import edu.umd.cs.findbugs.annotations.Nullable; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class JacksonJsonCodec implements TypeCodec { - - private final ObjectMapper objectMapper; - private final GenericType javaType; - - JacksonJsonCodec(Class javaClass) { - this(javaClass, new ObjectMapper()); - } - - private JacksonJsonCodec(Class javaClass, ObjectMapper objectMapper) { - this.javaType = GenericType.of(javaClass); - this.objectMapper = objectMapper; - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.TEXT; - } - - @Nullable - @Override - public ByteBuffer encode(@Nullable T value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } - try { - return ByteBuffer.wrap(objectMapper.writeValueAsBytes(value)); - } catch (JsonProcessingException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - @Nullable - @Override - public T decode(@Nullable ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - if (bytes == null) { - return null; - } - try { - return objectMapper.readValue(Bytes.getArray(bytes), toJacksonJavaType()); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - @NonNull - @Override - public String format(T value) { - if (value == null) { - return "NULL"; - } - String json; - try { - json = objectMapper.writeValueAsString(value); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - return Strings.quote(json); - } - - @Nullable - @Override - @SuppressWarnings("unchecked") - public T parse(String value) { - if (value == null || value.isEmpty() || value.equalsIgnoreCase("NULL")) { - return null; - } - if (!Strings.isQuoted(value)) { - throw new IllegalArgumentException("JSON strings must be enclosed by single quotes"); - } - String json = Strings.unquote(value); - try { - return (T) objectMapper.readValue(json, toJacksonJavaType()); - } catch (IOException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - private JavaType toJacksonJavaType() { - return TypeFactory.defaultInstance().constructType(getJavaType().getType()); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/AllNodesFailedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/AllNodesFailedIT.java new file mode 100644 index 00000000000..ed453681a65 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/AllNodesFailedIT.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core; + +import static com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readTimeout; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class AllNodesFailedIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(2)); + + @Test + public void should_report_multiple_errors_per_node() { + SIMULACRON_RULE.cluster().prime(when("SELECT foo").then(readTimeout(ONE, 0, 0, false))); + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, MultipleRetryPolicy.class) + .build(); + + try (CqlSession session = + (CqlSession) + SessionUtils.baseBuilder() + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) + .withConfigLoader(loader) + .build()) { + // when executing a query. + session.execute("SELECT foo"); + fail("AllNodesFailedException expected"); + } catch (AllNodesFailedException ex) { + assertThat(ex.getAllErrors()).hasSize(2); + Iterator>> iterator = ex.getAllErrors().entrySet().iterator(); + // first node should have been tried twice + Entry> node1Errors = iterator.next(); + assertThat(node1Errors.getValue()).hasSize(2); + // second node should have been tried twice + Entry> node2Errors = iterator.next(); + assertThat(node2Errors.getValue()).hasSize(2); + } + } + + public static class MultipleRetryPolicy extends DefaultRetryPolicy { + + public MultipleRetryPolicy(DriverContext context, String profileName) { + super(context, profileName); + } + + @Override + @Deprecated + public RetryDecision onReadTimeout( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int blockFor, + int received, + boolean dataPresent, + int retryCount) { + // retry each node twice + if (retryCount % 2 == 0) { + return RetryDecision.RETRY_SAME; + } else { + return RetryDecision.RETRY_NEXT; + } + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ConnectIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectIT.java similarity index 63% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ConnectIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectIT.java index 6d1b525f23a..67585bc691d 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ConnectIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,13 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core; +package com.datastax.oss.driver.core; -import static com.datastax.oss.driver.api.testinfra.utils.ConditionChecker.checkThat; -import static java.util.concurrent.TimeUnit.SECONDS; -import static junit.framework.TestCase.fail; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.awaitility.Awaitility.await; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.context.DriverContext; @@ -29,7 +36,6 @@ import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; @@ -44,44 +50,47 @@ import java.util.concurrent.TimeUnit; import org.junit.Before; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; @Category(ParallelizableTests.class) public class ConnectIT { @ClassRule - public static SimulacronRule simulacronRule = + public static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(2)); - @Rule public ExpectedException thrown = ExpectedException.none(); - @Before public void setup() { - simulacronRule.cluster().acceptConnections(); + SIMULACRON_RULE.cluster().acceptConnections(); + SIMULACRON_RULE + .cluster() + .prime( + // Absolute minimum for a working schema metadata (we just want to check that it gets + // loaded at startup). + when("SELECT * FROM system_schema.keyspaces") + .then(rows().row("keyspace_name", "system").row("keyspace_name", "test"))); } @Test public void should_fail_fast_if_contact_points_unreachable_and_reconnection_disabled() { // Given - simulacronRule.cluster().rejectConnections(0, RejectScope.STOP); - - thrown.expect(AllNodesFailedException.class); - thrown.expectMessage( - "Could not reach any contact point, make sure you've provided valid addresses"); + SIMULACRON_RULE.cluster().rejectConnections(0, RejectScope.STOP); // When - SessionUtils.newSession(simulacronRule); + Throwable t = catchThrowable(() -> SessionUtils.newSession(SIMULACRON_RULE)); - // Then the exception is thrown + // Then + assertThat(t) + .isInstanceOf(AllNodesFailedException.class) + .hasMessageContaining( + "Could not reach any contact point, make sure you've provided valid addresses"); } @Test public void should_wait_for_contact_points_if_reconnection_enabled() throws Exception { // Given - simulacronRule.cluster().rejectConnections(0, RejectScope.STOP); + SIMULACRON_RULE.cluster().rejectConnections(0, RejectScope.STOP); // When DriverConfigLoader loader = @@ -93,7 +102,7 @@ public void should_wait_for_contact_points_if_reconnection_enabled() throws Exce .withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofMillis(500)) .build(); CompletableFuture sessionFuture = - newSessionAsync(simulacronRule, loader).toCompletableFuture(); + newSessionAsync(loader).toCompletableFuture(); // wait a bit to ensure we have a couple of reconnections, otherwise we might race and allow // reconnections before the initial attempt TimeUnit.SECONDS.sleep(2); @@ -102,12 +111,12 @@ public void should_wait_for_contact_points_if_reconnection_enabled() throws Exce assertThat(sessionFuture).isNotCompleted(); // When - simulacronRule.cluster().acceptConnections(); + SIMULACRON_RULE.cluster().acceptConnections(); // Then this doesn't throw - Session session = sessionFuture.get(2, TimeUnit.SECONDS); - - session.close(); + try (Session session = sessionFuture.get(30, TimeUnit.SECONDS)) { + assertThat(session.getMetadata().getKeyspaces()).containsKey(CqlIdentifier.fromCql("test")); + } } /** @@ -116,22 +125,23 @@ public void should_wait_for_contact_points_if_reconnection_enabled() throws Exce */ @Test public void should_cleanup_on_lbp_init_failure() { - try { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .without(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER) - .build(); - CqlSession.builder() - .addContactEndPoints(simulacronRule.getContactPoints()) - .withConfigLoader(loader) - .build(); - fail("Should have thrown a DriverException for no DC with explicit contact point"); - } catch (DriverException ignored) { - } + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .without(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER) + .build(); + assertThatThrownBy( + () -> + CqlSession.builder() + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) + .withConfigLoader(loader) + .build()) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Since you provided explicit contact points, the local DC must be explicitly set"); // One second should be plenty of time for connections to close server side - checkThat(() -> simulacronRule.cluster().getConnections().getConnections().isEmpty()) - .before(1, SECONDS) - .becomesTrue(); + await() + .atMost(1, TimeUnit.SECONDS) + .until(() -> SIMULACRON_RULE.cluster().getConnections().getConnections().isEmpty()); } /** @@ -141,13 +151,16 @@ public void should_cleanup_on_lbp_init_failure() { @Test public void should_mark_unreachable_contact_points_as_local_and_schedule_reconnections() { // Reject connections only on one node - BoundCluster boundCluster = simulacronRule.cluster(); + BoundCluster boundCluster = SIMULACRON_RULE.cluster(); boundCluster.node(0).rejectConnections(0, RejectScope.STOP); - try (CqlSession session = SessionUtils.newSession(simulacronRule)) { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { Map nodes = session.getMetadata().getNodes(); // Node states are updated asynchronously, so guard against race conditions - ConditionChecker.checkThat( + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( () -> { // Before JAVA-2177, this would fail every other time because if the node was tried // first for the initial connection, it was marked down and not passed to @@ -163,16 +176,14 @@ public void should_mark_unreachable_contact_points_as_local_and_schedule_reconne assertThat(node1.getDistance()).isEqualTo(NodeDistance.LOCAL); assertThat(node1.getOpenConnections()).isEqualTo(2); // control + regular assertThat(node1.isReconnecting()).isFalse(); - }) - .becomesTrue(); + }); } } @SuppressWarnings("unchecked") - private CompletionStage newSessionAsync( - SimulacronRule serverRule, DriverConfigLoader loader) { + private CompletionStage newSessionAsync(DriverConfigLoader loader) { return SessionUtils.baseBuilder() - .addContactEndPoints(serverRule.getContactPoints()) + .addContactEndPoints(ConnectIT.SIMULACRON_RULE.getContactPoints()) .withConfigLoader(loader) .buildAsync(); } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ConnectKeyspaceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectKeyspaceIT.java similarity index 64% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ConnectKeyspaceIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectKeyspaceIT.java index 32347d57e0b..af943b00184 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ConnectKeyspaceIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ConnectKeyspaceIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,10 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core; +package com.datastax.oss.driver.core; import static org.assertj.core.api.Assertions.assertThat; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.InvalidKeyspaceException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.session.Session; @@ -32,23 +37,24 @@ @Category(ParallelizableTests.class) public class ConnectKeyspaceIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @Test public void should_connect_to_existing_keyspace() { - CqlIdentifier keyspace = sessionRule.keyspace(); - try (Session session = SessionUtils.newSession(ccm, keyspace)) { + CqlIdentifier keyspace = SESSION_RULE.keyspace(); + try (Session session = SessionUtils.newSession(CCM_RULE, keyspace)) { assertThat(session.getKeyspace()).hasValue(keyspace); } } @Test public void should_connect_with_no_keyspace() { - try (Session session = SessionUtils.newSession(ccm)) { + try (Session session = SessionUtils.newSession(CCM_RULE)) { assertThat(session.getKeyspace()).isEmpty(); } } @@ -69,6 +75,6 @@ public void should_fail_to_connect_to_non_existent_keyspace_when_reconnecting_on private void should_fail_to_connect_to_non_existent_keyspace(DriverConfigLoader loader) { CqlIdentifier keyspace = CqlIdentifier.fromInternal("does not exist"); - SessionUtils.newSession(ccm, keyspace, loader); + SessionUtils.newSession(CCM_RULE, keyspace, loader); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/PeersV2NodeRefreshIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/PeersV2NodeRefreshIT.java new file mode 100644 index 00000000000..47f3e3957af --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/PeersV2NodeRefreshIT.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.cluster.QueryLog; +import com.datastax.oss.simulacron.server.BoundCluster; +import com.datastax.oss.simulacron.server.Server; +import java.util.concurrent.ExecutionException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** Test for JAVA-2654. */ +public class PeersV2NodeRefreshIT { + + private static Server peersV2Server; + private static BoundCluster cluster; + + @BeforeClass + public static void setup() { + peersV2Server = Server.builder().withMultipleNodesPerIp(true).build(); + cluster = peersV2Server.register(ClusterSpec.builder().withNodes(2)); + } + + @AfterClass + public static void tearDown() { + if (cluster != null) { + cluster.stop(); + } + if (peersV2Server != null) { + peersV2Server.close(); + } + } + + @Test + public void should_successfully_send_peers_v2_node_refresh_query() + throws InterruptedException, ExecutionException { + CqlSession session = + CqlSession.builder().addContactPoint(cluster.node(1).inetSocketAddress()).build(); + Node node = findNonControlNode(session); + ((InternalDriverContext) session.getContext()) + .getMetadataManager() + .refreshNode(node) + .toCompletableFuture() + .get(); + assertThat(hasNodeRefreshQuery()) + .describedAs("Expecting peers_v2 node refresh query to be present but it wasn't") + .isTrue(); + } + + private Node findNonControlNode(CqlSession session) { + EndPoint controlNode = + ((InternalDriverContext) session.getContext()) + .getControlConnection() + .channel() + .getEndPoint(); + return session.getMetadata().getNodes().values().stream() + .filter(node -> !node.getEndPoint().equals(controlNode)) + .findAny() + .orElseThrow(() -> new IllegalStateException("Expecting at least one non-control node")); + } + + private boolean hasNodeRefreshQuery() { + for (QueryLog log : cluster.getLogs().getQueryLogs()) { + if (log.getFrame().message instanceof Query) { + if (((Query) log.getFrame().message) + .query.contains( + "SELECT * FROM system.peers_v2 WHERE peer = :address and peer_port = :port")) { + return true; + } + } + } + return false; + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/PoolBalancingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/PoolBalancingIT.java new file mode 100644 index 00000000000..c927976520b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/PoolBalancingIT.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class PoolBalancingIT { + + private static final int POOL_SIZE = 2; + private static final int REQUESTS_PER_CONNECTION = 20; + + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE) + .withConfigLoader( + DriverConfigLoader.programmaticBuilder() + .withInt(DefaultDriverOption.CONNECTION_MAX_REQUESTS, REQUESTS_PER_CONNECTION) + .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, POOL_SIZE) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); + + private CountDownLatch done; + private AtomicReference unexpectedErrorRef; + + @Before + public void setup() { + done = new CountDownLatch(1); + unexpectedErrorRef = new AtomicReference<>(); + } + + @Test + public void should_balance_requests_across_connections() throws InterruptedException { + // Generate just the right load to completely fill the pool. All requests should succeed. + int simultaneousRequests = POOL_SIZE * REQUESTS_PER_CONNECTION; + + for (int i = 0; i < simultaneousRequests; i++) { + reschedule(null, null); + } + SECONDS.sleep(1); + done.countDown(); + + Throwable unexpectedError = unexpectedErrorRef.get(); + if (unexpectedError != null) { + fail("At least one request failed unexpectedly", unexpectedError); + } + } + + private void reschedule(AsyncResultSet asyncResultSet, Throwable throwable) { + if (done.getCount() == 1) { + if (throwable != null + // Actually there is a tiny race condition where pool acquisition may still fail: channel + // sizes can change as the client is iterating through them, so it can look like they're + // all full even if there's always a free slot somewhere at every point in time. This will + // result in NoNodeAvailableException, ignore it. + && !(throwable instanceof NoNodeAvailableException)) { + unexpectedErrorRef.compareAndSet(null, throwable); + // Even a single error is a failure, no need to continue + done.countDown(); + } + SESSION_RULE + .session() + .executeAsync("SELECT release_version FROM system.local") + .whenComplete(this::reschedule); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionInitialNegotiationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionInitialNegotiationIT.java new file mode 100644 index 00000000000..326c05eb15b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionInitialNegotiationIT.java @@ -0,0 +1,320 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.api.core.DseProtocolVersion; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** Covers protocol negotiation for the initial connection to the first contact point. */ +@Category(ParallelizableTests.class) +public class ProtocolVersionInitialNegotiationIT { + + @Rule public CcmRule ccm = CcmRule.getInstance(); + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.1", + maxExclusive = "2.2", + description = "Only C* in [2.1,2.2[ has V3 as its highest version") + @BackendRequirement( + type = BackendType.DSE, + maxExclusive = "5.0", + description = "Only DSE in [*,5.0[ has V3 as its highest version") + @Test + public void should_downgrade_to_v3() { + try (CqlSession session = SessionUtils.newSession(ccm)) { + assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.2", + maxExclusive = "4.0-rc1", + description = "Only C* in [2.2,4.0-rc1[ has V4 as its highest version") + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + maxExclusive = "5.1", + description = "Only DSE in [5.0,5.1[ has V4 as its highest version") + @Test + public void should_downgrade_to_v4() { + try (CqlSession session = SessionUtils.newSession(ccm)) { + assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(4); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "4.0-rc1", + description = "Only C* in [4.0-rc1,*[ has V5 as its highest version") + @Test + public void should_downgrade_to_v5_oss() { + try (CqlSession session = SessionUtils.newSession(ccm)) { + assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(5); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1", + maxExclusive = "6.0", + description = "Only DSE in [5.1,6.0[ has DSE_V1 as its highest version") + @Test + public void should_downgrade_to_dse_v1() { + try (CqlSession session = SessionUtils.newSession(ccm)) { + assertThat(session.getContext().getProtocolVersion()).isEqualTo(DseProtocolVersion.DSE_V1); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + maxExclusive = "2.2", + description = "Only C* in [*,2.2[ has V4 unsupported") + @BackendRequirement( + type = BackendType.DSE, + maxExclusive = "5.0", + description = "Only DSE in [*,5.0[ has V4 unsupported") + @Test + public void should_fail_if_provided_v4_is_not_supported() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") + .build(); + try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException anfe) { + Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); + assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); + UnsupportedProtocolVersionException unsupportedException = + (UnsupportedProtocolVersionException) cause; + assertThat(unsupportedException.getAttemptedVersions()) + .containsOnly(DefaultProtocolVersion.V4); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.1", + maxExclusive = "4.0-rc1", + description = "Only C* in [2.1,4.0-rc1[ has V5 unsupported or supported as beta") + @BackendRequirement( + type = BackendType.DSE, + maxExclusive = "7.0", + description = "Only DSE in [*,7.0[ has V5 unsupported or supported as beta") + @Test + public void should_fail_if_provided_v5_is_not_supported() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "V5") + .build(); + try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException anfe) { + Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); + assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); + UnsupportedProtocolVersionException unsupportedException = + (UnsupportedProtocolVersionException) cause; + assertThat(unsupportedException.getAttemptedVersions()) + .containsOnly(DefaultProtocolVersion.V5); + } + } + + @BackendRequirement( + type = BackendType.DSE, + maxExclusive = "5.1", + description = "Only DSE in [*,5.1[ has DSE_V1 unsupported") + @Test + public void should_fail_if_provided_dse_v1_is_not_supported() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V1") + .build(); + try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException anfe) { + Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); + assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); + UnsupportedProtocolVersionException unsupportedException = + (UnsupportedProtocolVersionException) cause; + assertThat(unsupportedException.getAttemptedVersions()) + .containsOnly(DseProtocolVersion.DSE_V1); + } + } + + @BackendRequirement( + type = BackendType.DSE, + maxExclusive = "6.0", + description = "Only DSE in [*,6.0[ has DSE_V2 unsupported") + @Test + public void should_fail_if_provided_dse_v2_is_not_supported() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V2") + .build(); + try (CqlSession ignored = SessionUtils.newSession(ccm, loader)) { + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException anfe) { + Throwable cause = anfe.getAllErrors().values().iterator().next().get(0); + assertThat(cause).isInstanceOf(UnsupportedProtocolVersionException.class); + UnsupportedProtocolVersionException unsupportedException = + (UnsupportedProtocolVersionException) cause; + assertThat(unsupportedException.getAttemptedVersions()) + .containsOnly(DseProtocolVersion.DSE_V2); + } + } + + /** Note that this test will need to be updated as new protocol versions are introduced. */ + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "4.0", + description = "Only C* in [4.0,*[ has V5 supported") + @Test + public void should_not_downgrade_if_server_supports_latest_version() { + try (CqlSession session = SessionUtils.newSession(ccm)) { + assertThat(session.getContext().getProtocolVersion()).isEqualTo(ProtocolVersion.V5); + session.execute("select * from system.local"); + } + } + + /** Note that this test will need to be updated as new protocol versions are introduced. */ + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.0", + description = "Only DSE in [6.0,*[ has DSE_V2 supported") + @Test + public void should_not_downgrade_if_server_supports_latest_version_dse() { + try (CqlSession session = SessionUtils.newSession(ccm)) { + assertThat(session.getContext().getProtocolVersion()).isEqualTo(ProtocolVersion.DSE_V2); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.1", + description = "Only C* in [2.1,*[ has V3 supported") + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "4.8", + description = "Only DSE in [4.8,*[ has V3 supported") + @Test + public void should_use_explicitly_provided_v3() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") + .build(); + try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(3); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.2", + description = "Only C* in [2.2,*[ has V4 supported") + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "Only DSE in [5.0,*[ has V4 supported") + @Test + public void should_use_explicitly_provided_v4() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") + .build(); + try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(4); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "4.0", + description = "Only C* in [4.0,*[ has V5 supported") + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "7.0", + description = "Only DSE in [7.0,*[ has V5 supported") + @Test + public void should_use_explicitly_provided_v5() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "V5") + .build(); + try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + assertThat(session.getContext().getProtocolVersion().getCode()).isEqualTo(5); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1", + description = "Only DSE in [5.1,*[ has DSE_V1 supported") + @Test + public void should_use_explicitly_provided_dse_v1() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V1") + .build(); + try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + assertThat(session.getContext().getProtocolVersion()).isEqualTo(DseProtocolVersion.DSE_V1); + session.execute("select * from system.local"); + } + } + + @BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.0", + description = "Only DSE in [6.0,*[ has DSE_V2 supported") + @Test + public void should_use_explicitly_provided_dse_v2() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "DSE_V2") + .build(); + try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + assertThat(session.getContext().getProtocolVersion()).isEqualTo(DseProtocolVersion.DSE_V2); + session.execute("select * from system.local"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ProtocolVersionMixedClusterIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionMixedClusterIT.java similarity index 75% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ProtocolVersionMixedClusterIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionMixedClusterIT.java index 2d8c08ddeb2..fae7477063c 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ProtocolVersionMixedClusterIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ProtocolVersionMixedClusterIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,11 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core; +package com.datastax.oss.driver.core; import static com.datastax.oss.driver.assertions.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import static org.assertj.core.api.Assertions.fail; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; @@ -31,12 +37,9 @@ import com.datastax.oss.simulacron.server.BoundCluster; import com.datastax.oss.simulacron.server.BoundNode; import com.datastax.oss.simulacron.server.BoundTopic; -import java.net.InetSocketAddress; import java.util.stream.Stream; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; /** * Covers protocol re-negotiation with a mixed cluster: if, after the initial connection and the @@ -45,7 +48,6 @@ */ @Category(ParallelizableTests.class) public class ProtocolVersionMixedClusterIT { - @Rule public ExpectedException thrown = ExpectedException.none(); @Test public void should_downgrade_if_peer_does_not_support_negotiated_version() { @@ -63,19 +65,13 @@ public void should_downgrade_if_peer_does_not_support_negotiated_version() { .build()) { InternalDriverContext context = (InternalDriverContext) session.getContext(); + // General version should have been downgraded to V3 assertThat(context.getProtocolVersion()).isEqualTo(DefaultProtocolVersion.V3); + // But control connection should still be using protocol V4 since node0 supports V4 + assertThat(context.getControlConnection().channel().protocolVersion()) + .isEqualTo(DefaultProtocolVersion.V4); - // Find out which node became the control node after the reconnection (not necessarily node 0) - InetSocketAddress controlAddress = - (InetSocketAddress) context.getControlConnection().channel().getEndPoint().resolve(); - BoundNode currentControlNode = null; - for (BoundNode node : simulacron.getNodes()) { - if (node.inetSocketAddress().equals(controlAddress)) { - currentControlNode = node; - } - } - assertThat(currentControlNode).isNotNull(); - assertThat(queries(simulacron)).hasSize(8); + assertThat(queries(simulacron)).hasSize(4); assertThat(protocolQueries(contactPoint, 4)) .containsExactly( @@ -84,13 +80,6 @@ public void should_downgrade_if_peer_does_not_support_negotiated_version() { "SELECT * FROM system.local", "SELECT * FROM system.peers_v2", "SELECT * FROM system.peers"); - assertThat(protocolQueries(currentControlNode, 3)) - .containsExactly( - // Reconnection with protocol v3 - "SELECT cluster_name FROM system.local", - "SELECT * FROM system.local", - "SELECT * FROM system.peers_v2", - "SELECT * FROM system.peers"); } } @@ -124,19 +113,25 @@ public void should_keep_current_if_supported_by_all_peers() { @Test public void should_fail_if_peer_does_not_support_v3() { - thrown.expect(UnsupportedProtocolVersionException.class); - thrown.expectMessage( - "reports Cassandra version 2.0.9, but the driver only supports 2.1.0 and above"); - try (BoundCluster simulacron = mixedVersions("3.0.0", "2.0.9", "3.11"); - BoundNode contactPoint = simulacron.node(0); - CqlSession ignored = - (CqlSession) - SessionUtils.baseBuilder() - .addContactPoint(contactPoint.inetSocketAddress()) - .build()) { - fail("Cluster init should have failed"); - } + Throwable t = + catchThrowable( + () -> { + try (BoundCluster simulacron = mixedVersions("3.0.0", "2.0.9", "3.11"); + BoundNode contactPoint = simulacron.node(0); + CqlSession ignored = + (CqlSession) + SessionUtils.baseBuilder() + .addContactPoint(contactPoint.inetSocketAddress()) + .build()) { + fail("Cluster init should have failed"); + } + }); + + assertThat(t) + .isInstanceOf(UnsupportedProtocolVersionException.class) + .hasMessageContaining( + "reports Cassandra version 2.0.9, but the driver only supports 2.1.0 and above"); } @Test @@ -181,12 +176,12 @@ private BoundCluster mixedVersions(String... versions) { return SimulacronRule.server.register(clusterSpec); } - private Stream queries(BoundTopic topic) { + private Stream queries(BoundTopic topic) { return topic.getLogs().getQueryLogs().stream() .filter(q -> q.getFrame().message instanceof Query); } - private Stream protocolQueries(BoundTopic topic, int protocolVersion) { + private Stream protocolQueries(BoundTopic topic, int protocolVersion) { return queries(topic) .filter(q -> q.getFrame().protocolVersion == protocolVersion) .map(QueryLog::getQuery); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/SerializationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/SerializationIT.java new file mode 100644 index 00000000000..b33e5421838 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/SerializationIT.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core; + +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ColumnDefinition; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.servererrors.ServerError; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.SerializationHelper; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class SerializationIT { + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); + + @Before + public void clear() { + SIMULACRON_RULE.cluster().clearPrimes(true); + } + + @Test + public void should_serialize_node() { + // Given + Node node = SESSION_RULE.session().getMetadata().getNodes().values().iterator().next(); + + // When + Node deserializedNode = SerializationHelper.serializeAndDeserialize(node); + + // Then + // verify a few fields, no need to be exhaustive + assertThat(deserializedNode.getHostId()).isEqualTo(node.getHostId()); + assertThat(deserializedNode.getEndPoint()).isEqualTo(node.getEndPoint()); + assertThat(deserializedNode.getCassandraVersion()).isEqualTo(node.getCassandraVersion()); + } + + @Test + public void should_serialize_driver_exception() { + // Given + SIMULACRON_RULE.cluster().prime(when("mock query").then(serverError("mock server error"))); + try { + SESSION_RULE.session().execute("mock query"); + fail("Expected a ServerError"); + } catch (ServerError error) { + assertThat(error.getExecutionInfo()).isNotNull(); + + // When + ServerError deserializedError = SerializationHelper.serializeAndDeserialize(error); + + // Then + assertThat(deserializedError.getMessage()).isEqualTo("mock server error"); + assertThat(deserializedError.getCoordinator().getEndPoint()) + .isEqualTo(error.getCoordinator().getEndPoint()); + assertThat(deserializedError.getExecutionInfo()).isNull(); // transient + } + } + + @Test + public void should_serialize_row() { + // Given + SIMULACRON_RULE + .cluster() + .prime(when("mock query").then(rows().row("t", "mock data").columnTypes("t", "varchar"))); + Row row = SESSION_RULE.session().execute("mock query").one(); + + // When + row = SerializationHelper.serializeAndDeserialize(row); + + // Then + ColumnDefinition columnDefinition = row.getColumnDefinitions().get("t"); + assertThat(columnDefinition.getType()).isEqualTo(DataTypes.TEXT); + assertThat(row.getString("t")).isEqualTo("mock data"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/SessionLeakIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/SessionLeakIT.java new file mode 100644 index 00000000000..c0cf0b78e7f --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/SessionLeakIT.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.InvalidKeyspaceException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.IsolatedTests; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; +import java.util.HashSet; +import java.util.Set; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; + +@Category(IsolatedTests.class) +@RunWith(MockitoJUnitRunner.class) +public class SessionLeakIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + @Mock private Appender appender; + @Captor private ArgumentCaptor loggingEventCaptor; + + @Before + public void setupLogger() { + Logger logger = (Logger) LoggerFactory.getLogger(DefaultSession.class); + logger.setLevel(Level.WARN); + logger.addAppender(appender); + // no need to clean up after since this is an isolated test + } + + @Test + public void should_warn_when_session_count_exceeds_threshold() { + int threshold = 4; + // Set the config option explicitly, in case it gets overridden in the test application.conf: + DriverConfigLoader configLoader = + DriverConfigLoader.programmaticBuilder() + .withInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD, threshold) + .build(); + + Set sessions = new HashSet<>(); + + // Stay under the threshold, no warnings expected + for (int i = 0; i < threshold; i++) { + sessions.add(SessionUtils.newSession(SIMULACRON_RULE, configLoader)); + } + verify(appender, never()).doAppend(any()); + + // Go over the threshold, 1 warning for every new session + sessions.add(SessionUtils.newSession(SIMULACRON_RULE, configLoader)); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .contains("You have too many session instances: 5 active, expected less than 4"); + + reset(appender); + sessions.add(SessionUtils.newSession(SIMULACRON_RULE, configLoader)); + verify(appender).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .contains("You have too many session instances: 6 active, expected less than 4"); + + // Go back under the threshold, no warnings expected + sessions.forEach(Session::close); + sessions.clear(); + reset(appender); + CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, configLoader); + verify(appender, never()).doAppend(any()); + session.close(); + } + + @Test + public void should_never_warn_when_session_init_fails() { + SIMULACRON_RULE + .cluster() + .prime(PrimeDsl.when("USE \"non_existent_keyspace\"").then(PrimeDsl.invalid("irrelevant"))); + int threshold = 4; + // Set the config option explicitly, in case it gets overridden in the test application.conf: + DriverConfigLoader configLoader = + DriverConfigLoader.programmaticBuilder() + .withInt(DefaultDriverOption.SESSION_LEAK_THRESHOLD, threshold) + .build(); + // Go over the threshold, no warnings expected + for (int i = 0; i < threshold + 1; i++) { + try (Session session = + SessionUtils.newSession( + SIMULACRON_RULE, CqlIdentifier.fromCql("non_existent_keyspace"), configLoader)) { + fail("Session %s should have failed to initialize", session.getName()); + } catch (InvalidKeyspaceException e) { + assertThat(e.getMessage()).isEqualTo("Invalid keyspace non_existent_keyspace"); + } + } + verify(appender, never()).doAppend(any()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/auth/PlainTextAuthProviderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/auth/PlainTextAuthProviderIT.java new file mode 100644 index 00000000000..86dd6cda2fd --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/auth/PlainTextAuthProviderIT.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.auth; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.auth.AuthProvider; +import com.datastax.oss.driver.api.core.auth.ProgrammaticPlainTextAuthProvider; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.auth.PlainTextAuthProvider; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import java.util.concurrent.TimeUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +public class PlainTextAuthProviderIT { + + @ClassRule + public static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder() + .withCassandraConfiguration("authenticator", "PasswordAuthenticator") + .withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0") + .build(); + + @BeforeClass + public static void sleepForAuth() { + if (CCM_RULE.getCassandraVersion().compareTo(Version.V2_2_0) < 0) { + // Sleep for 1 second to allow C* auth to do its work. This is only needed for 2.1 + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + + @Test + public void should_connect_with_credentials() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "cassandra") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "cassandra") + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + session.execute("select * from system.local"); + } + } + + @Test + public void should_connect_with_programmatic_credentials() { + + SessionBuilder builder = + SessionUtils.baseBuilder() + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withAuthCredentials("cassandra", "cassandra"); + + try (CqlSession session = (CqlSession) builder.build()) { + session.execute("select * from system.local"); + } + } + + @Test + public void should_connect_with_programmatic_provider() { + + AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("cassandra", "cassandra"); + SessionBuilder builder = + SessionUtils.baseBuilder() + .addContactEndPoints(CCM_RULE.getContactPoints()) + // Open more than one connection in order to validate that the provider is creating + // valid Credentials for every invocation of PlainTextAuthProviderBase.getCredentials. + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, 4) + .build()) + .withAuthProvider(authProvider); + + try (CqlSession session = (CqlSession) builder.build()) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_with_invalid_credentials() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class) + .withString(DefaultDriverOption.AUTH_PROVIDER_USER_NAME, "baduser") + .withString(DefaultDriverOption.AUTH_PROVIDER_PASSWORD, "badpass") + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_with_invalid_programmatic_credentials() { + SessionBuilder builder = + SessionUtils.baseBuilder() + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withAuthCredentials("baduser", "badpass"); + + try (CqlSession session = (CqlSession) builder.build()) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_with_invalid_programmatic_provider() { + + AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("baduser", "badpass"); + SessionBuilder builder = + SessionUtils.baseBuilder() + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withAuthProvider(authProvider); + + try (CqlSession session = (CqlSession) builder.build()) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_without_credentials() { + try (CqlSession session = SessionUtils.newSession(CCM_RULE)) { + session.execute("select * from system.local"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/compression/DirectCompressionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/DirectCompressionIT.java similarity index 70% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/compression/DirectCompressionIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/compression/DirectCompressionIT.java index e788e5352f8..3dad08f4de6 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/compression/DirectCompressionIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/DirectCompressionIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.compression; +package com.datastax.oss.driver.core.compression; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.offset; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; @@ -26,10 +29,12 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; import java.time.Duration; +import org.junit.Assume; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -40,21 +45,22 @@ @Category(ParallelizableTests.class) public class DirectCompressionIT { - private static CcmRule ccmRule = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule schemaSessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SCHEMA_SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(schemaSessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SCHEMA_SESSION_RULE); @BeforeClass public static void setup() { - schemaSessionRule + SCHEMA_SESSION_RULE .session() .execute("CREATE TABLE test (k text PRIMARY KEY, t text, i int, f float)"); } @@ -68,6 +74,11 @@ public static void setup() { */ @Test public void should_execute_queries_with_snappy_compression() throws Exception { + Assume.assumeTrue( + "Snappy is not supported in OSS C* 4.0+ with protocol v5", + !CCM_RULE.isDistributionOf(BackendType.HCD) + && (CCM_RULE.isDistributionOf(BackendType.DSE) + || CCM_RULE.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) < 0)); createAndCheckCluster("snappy"); } @@ -89,7 +100,7 @@ private void createAndCheckCluster(String compressorOption) { .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compressorOption) .build(); try (CqlSession session = - SessionUtils.newSession(ccmRule, schemaSessionRule.keyspace(), loader)) { + SessionUtils.newSession(CCM_RULE, SCHEMA_SESSION_RULE.keyspace(), loader)) { // Run a couple of simple test queries ResultSet rs = session.execute( @@ -111,7 +122,12 @@ private void createAndCheckCluster(String compressorOption) { // We are testing with small responses, so the compressed payload is not even guaranteed to be // smaller. assertThat(executionInfo.getResponseSizeInBytes()).isGreaterThan(0); - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isGreaterThan(0); + if (session.getContext().getProtocolVersion().getCode() == 5) { + // in protocol v5, compression is done at segment level + assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1); + } else { + assertThat(executionInfo.getCompressedResponseSizeInBytes()).isGreaterThan(0); + } } } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/compression/HeapCompressionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/HeapCompressionIT.java similarity index 70% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/compression/HeapCompressionIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/compression/HeapCompressionIT.java index 809b0083ac3..a14c3b29b21 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/compression/HeapCompressionIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/compression/HeapCompressionIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.compression; +package com.datastax.oss.driver.core.compression; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.offset; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; @@ -26,10 +29,12 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.IsolatedTests; import java.time.Duration; +import org.junit.Assume; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -45,21 +50,22 @@ public class HeapCompressionIT { System.setProperty("io.netty.noUnsafe", "true"); } - private static CustomCcmRule ccmRule = CustomCcmRule.builder().build(); + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); - private static SessionRule schemaSessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SCHEMA_SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(schemaSessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SCHEMA_SESSION_RULE); @BeforeClass public static void setup() { - schemaSessionRule + SCHEMA_SESSION_RULE .session() .execute("CREATE TABLE test (k text PRIMARY KEY, t text, i int, f float)"); } @@ -72,6 +78,10 @@ public static void setup() { */ @Test public void should_execute_queries_with_snappy_compression() throws Exception { + Assume.assumeTrue( + "Snappy is not supported in OSS C* 4.0+ with protocol v5", + CCM_RULE.isDistributionOf(BackendType.DSE) + || CCM_RULE.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) < 0); createAndCheckCluster("snappy"); } @@ -92,7 +102,7 @@ private void createAndCheckCluster(String compressorOption) { .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compressorOption) .build(); try (CqlSession session = - SessionUtils.newSession(ccmRule, schemaSessionRule.keyspace(), loader)) { + SessionUtils.newSession(CCM_RULE, SCHEMA_SESSION_RULE.keyspace(), loader)) { // Run a couple of simple test queries ResultSet rs = session.execute( @@ -114,7 +124,12 @@ private void createAndCheckCluster(String compressorOption) { // We are testing with small responses, so the compressed payload is not even guaranteed to be // smaller. assertThat(executionInfo.getResponseSizeInBytes()).isGreaterThan(0); - assertThat(executionInfo.getCompressedResponseSizeInBytes()).isGreaterThan(0); + if (session.getContext().getProtocolVersion().getCode() == 5) { + // in protocol v5, compression is done at segment level + assertThat(executionInfo.getCompressedResponseSizeInBytes()).isEqualTo(-1); + } else { + assertThat(executionInfo.getCompressedResponseSizeInBytes()).isGreaterThan(0); + } } } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverConfigValidationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverConfigValidationIT.java similarity index 50% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverConfigValidationIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverConfigValidationIT.java index 60fb91fe6b9..e5056e05495 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverConfigValidationIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverConfigValidationIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,26 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.config; +package com.datastax.oss.driver.core.config; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import com.datastax.oss.driver.api.core.DriverExecutionException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import org.junit.Rule; +import java.util.Collections; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; @Category(ParallelizableTests.class) public class DriverConfigValidationIT { - @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); - @Rule public ExpectedException thrown = ExpectedException.none(); + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); @Test public void should_fail_to_init_with_invalid_policy() { @@ -43,28 +47,43 @@ public void should_fail_to_init_with_invalid_policy() { should_fail_to_init_with_invalid_policy(DefaultDriverOption.AUTH_PROVIDER_CLASS); should_fail_to_init_with_invalid_policy(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS); should_fail_to_init_with_invalid_policy(DefaultDriverOption.TIMESTAMP_GENERATOR_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.REQUEST_TRACKER_CLASS); should_fail_to_init_with_invalid_policy(DefaultDriverOption.REQUEST_THROTTLER_CLASS); - should_fail_to_init_with_invalid_policy(DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASS); - should_fail_to_init_with_invalid_policy( - DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASS); should_fail_to_init_with_invalid_policy(DefaultDriverOption.ADDRESS_TRANSLATOR_CLASS); } + @Test + public void should_fail_to_init_with_invalid_components() { + should_fail_to_init_with_invalid_components(DefaultDriverOption.REQUEST_TRACKER_CLASSES); + should_fail_to_init_with_invalid_components( + DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES); + should_fail_to_init_with_invalid_components( + DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES); + } + private void should_fail_to_init_with_invalid_policy(DefaultDriverOption option) { DriverConfigLoader loader = SessionUtils.configLoaderBuilder().withString(option, "AClassThatDoesNotExist").build(); - assertThatThrownBy(() -> SessionUtils.newSession(simulacron, loader)) + assertConfigError(option, loader); + } + + private void should_fail_to_init_with_invalid_components(DefaultDriverOption option) { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withStringList(option, Collections.singletonList("AClassThatDoesNotExist")) + .build(); + assertConfigError(option, loader); + } + + private void assertConfigError(DefaultDriverOption option, DriverConfigLoader loader) { + assertThatThrownBy(() -> SessionUtils.newSession(SIMULACRON_RULE, loader)) .satisfies( - error -> { - assertThat(error).isInstanceOf(DriverExecutionException.class); - assertThat(error.getCause()) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining( - "Can't find class AClassThatDoesNotExist " - + "(specified by " - + option.getPath() - + ")"); - }); + error -> + assertThat(error) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + "Can't find class AClassThatDoesNotExist " + + "(specified by " + + option.getPath() + + ")")); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileCcmIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileCcmIT.java new file mode 100644 index 00000000000..1eee9c304b6 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileCcmIT.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.config; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import java.util.concurrent.CompletionStage; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class DriverExecutionProfileCcmIT { + + @ClassRule public static final CcmRule CCM_RULE = CcmRule.getInstance(); + + @Test + public void should_use_profile_page_size() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 100) + .startProfile("smallpages") + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 10) + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + + CqlIdentifier keyspace = SessionUtils.uniqueKeyspaceId(); + DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); + SessionUtils.createKeyspace(session, keyspace, slowProfile); + + session.execute(String.format("USE %s", keyspace.asCql(false))); + + // load 500 rows (value beyond page size). + session.execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test (k int, v int, PRIMARY KEY (k,v))") + .setExecutionProfile(slowProfile) + .build()); + PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (0, ?)"); + BatchStatementBuilder bs = + BatchStatement.builder(DefaultBatchType.UNLOGGED).setExecutionProfile(slowProfile); + for (int i = 0; i < 500; i++) { + bs.addStatement(prepared.bind(i)); + } + session.execute(bs.build()); + + String query = "SELECT * FROM test where k=0"; + // Execute query without profile, should use global page size (100) + CompletionStage future = session.executeAsync(query); + AsyncResultSet result = CompletableFutures.getUninterruptibly(future); + assertThat(result.remaining()).isEqualTo(100); + result = CompletableFutures.getUninterruptibly(result.fetchNextPage()); + // next fetch should also be 100 pages. + assertThat(result.remaining()).isEqualTo(100); + + // Execute query with profile, should use profile page size + future = + session.executeAsync( + SimpleStatement.builder(query).setExecutionProfileName("smallpages").build()); + result = CompletableFutures.getUninterruptibly(future); + assertThat(result.remaining()).isEqualTo(10); + // next fetch should also be 10 pages. + result = CompletableFutures.getUninterruptibly(result.fetchNextPage()); + assertThat(result.remaining()).isEqualTo(10); + + SessionUtils.dropKeyspace(session, keyspace, slowProfile); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfileReloadIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileReloadIT.java similarity index 78% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfileReloadIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileReloadIT.java index 8f03e8aad94..02bea70405e 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfileReloadIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileReloadIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.config; +package com.datastax.oss.driver.core.config; import static com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader.DEFAULT_CONFIG_SUPPLIER; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import static org.junit.Assert.fail; import com.datastax.oss.driver.api.core.CqlSession; @@ -34,18 +37,24 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import org.junit.Rule; +import org.junit.Before; +import org.junit.ClassRule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class DriverExecutionProfileReloadIT { - @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(3)); + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); - @Rule public ExpectedException thrown = ExpectedException.none(); + @Before + public void clearPrimes() { + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + } @Test - public void should_periodically_reload_configuration() throws Exception { + public void should_periodically_reload_configuration() { String query = "mockquery"; // Define a loader which configures a reload interval of 2s and current value of configSource. AtomicReference configSource = new AtomicReference<>(""); @@ -61,9 +70,9 @@ public void should_periodically_reload_configuration() throws Exception { (CqlSession) SessionUtils.baseBuilder() .withConfigLoader(loader) - .addContactEndPoints(simulacron.getContactPoints()) + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) .build()) { - simulacron.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); + SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); // Expect timeout since default session timeout is 2s try { @@ -83,7 +92,7 @@ public void should_periodically_reload_configuration() throws Exception { } @Test - public void should_reload_configuration_when_event_fired() throws Exception { + public void should_reload_configuration_when_event_fired() { String query = "mockquery"; // Define a loader which configures no automatic reloads and current value of configSource. AtomicReference configSource = new AtomicReference<>(""); @@ -99,9 +108,9 @@ public void should_reload_configuration_when_event_fired() throws Exception { (CqlSession) SessionUtils.baseBuilder() .withConfigLoader(loader) - .addContactEndPoints(simulacron.getContactPoints()) + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) .build()) { - simulacron.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); + SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); // Expect timeout since default session timeout is 2s try { @@ -122,7 +131,7 @@ public void should_reload_configuration_when_event_fired() throws Exception { } @Test - public void should_not_allow_dynamically_adding_profile() throws Exception { + public void should_not_allow_dynamically_adding_profile() { String query = "mockquery"; // Define a loader which configures a reload interval of 2s and current value of configSource. AtomicReference configSource = new AtomicReference<>(""); @@ -136,9 +145,9 @@ public void should_not_allow_dynamically_adding_profile() throws Exception { (CqlSession) SessionUtils.baseBuilder() .withConfigLoader(loader) - .addContactEndPoints(simulacron.getContactPoints()) + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) .build()) { - simulacron.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); + SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); // Expect failure because profile doesn't exist. try { @@ -154,13 +163,18 @@ public void should_not_allow_dynamically_adding_profile() throws Exception { // Execute again, should expect to fail again because doesn't allow to dynamically define // profile. - thrown.expect(IllegalArgumentException.class); - session.execute(SimpleStatement.builder(query).setExecutionProfileName("slow").build()); + Throwable t = + catchThrowable( + () -> + session.execute( + SimpleStatement.builder(query).setExecutionProfileName("slow").build())); + + assertThat(t).isInstanceOf(IllegalArgumentException.class); } } @Test - public void should_reload_profile_config_when_reloading_config() throws Exception { + public void should_reload_profile_config_when_reloading_config() { String query = "mockquery"; // Define a loader which configures a reload interval of 2s and current value of configSource. // Define initial profile settings so it initially exists. @@ -178,9 +192,9 @@ public void should_reload_profile_config_when_reloading_config() throws Exceptio (CqlSession) SessionUtils.baseBuilder() .withConfigLoader(loader) - .addContactEndPoints(simulacron.getContactPoints()) + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) .build()) { - simulacron.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); + SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); // Expect failure because profile doesn't exist. try { diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfileIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileSimulacronIT.java similarity index 51% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfileIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileSimulacronIT.java index f261ac0d3c4..f5131a2bfa3 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/config/DriverExecutionProfileIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/DriverExecutionProfileSimulacronIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,63 +15,61 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.config; +package com.datastax.oss.driver.core.config; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import static org.junit.Assert.fail; import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.DriverTimeoutException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BatchStatement; -import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; -import com.datastax.oss.driver.api.core.cql.DefaultBatchType; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.servererrors.ServerError; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; import com.datastax.oss.driver.categories.ParallelizableTests; -import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; import com.datastax.oss.simulacron.common.cluster.QueryLog; import java.time.Duration; import java.util.Optional; -import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; -import org.junit.Rule; +import org.junit.Before; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; @Category(ParallelizableTests.class) -public class DriverExecutionProfileIT { - - @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(3)); +public class DriverExecutionProfileSimulacronIT { - @Rule public CcmRule ccm = CcmRule.getInstance(); + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); - @Rule public ExpectedException thrown = ExpectedException.none(); - - // TODO: Test with reprepare on all nodes profile configuration + @Before + public void clearPrimes() { + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + } @Test public void should_fail_if_config_profile_specified_doesnt_exist() { - try (CqlSession session = SessionUtils.newSession(simulacron)) { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { SimpleStatement statement = SimpleStatement.builder("select * from system.local") .setExecutionProfileName("IDONTEXIST") .build(); - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Unknown profile 'IDONTEXIST'. Check your configuration"); - session.execute(statement); + Throwable t = catchThrowable(() -> session.execute(statement)); + + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Unknown profile 'IDONTEXIST'. Check your configuration."); } } @@ -81,10 +81,10 @@ public void should_use_profile_request_timeout() { .startProfile("olap") .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)) .build(); - try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, loader)) { String query = "mockquery"; // configure query with delay of 4 seconds. - simulacron.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); + SIMULACRON_RULE.cluster().prime(when(query).then(noRows()).delay(4, TimeUnit.SECONDS)); // Execute query without profile, should timeout with default session timeout (2s). try { @@ -106,10 +106,10 @@ public void should_use_profile_default_idempotence() { .startProfile("idem") .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) .build(); - try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, loader)) { String query = "mockquery"; // configure query with server error which should invoke onRequestError in retry policy. - simulacron.cluster().prime(when(query).then(serverError("fail"))); + SIMULACRON_RULE.cluster().prime(when(query).then(serverError("fail"))); // Execute query without profile, should fail because couldn't be retried. try { @@ -120,8 +120,13 @@ public void should_use_profile_default_idempotence() { } // Execute query with profile, should retry on all hosts since query is idempotent. - thrown.expect(AllNodesFailedException.class); - session.execute(SimpleStatement.builder(query).setExecutionProfileName("idem").build()); + Throwable t = + catchThrowable( + () -> + session.execute( + SimpleStatement.builder(query).setExecutionProfileName("idem").build())); + + assertThat(t).isInstanceOf(AllNodesFailedException.class); } } @@ -133,14 +138,14 @@ public void should_use_profile_consistency() { .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "LOCAL_QUORUM") .withString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY, "LOCAL_SERIAL") .build(); - try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, loader)) { String query = "mockquery"; // Execute query without profile, should use default CLs (LOCAL_ONE, SERIAL). session.execute(query); Optional log = - simulacron.cluster().getLogs().getQueryLogs().stream() + SIMULACRON_RULE.cluster().getLogs().getQueryLogs().stream() .filter(q -> q.getQuery().equals(query)) .findFirst(); @@ -152,13 +157,13 @@ public void should_use_profile_consistency() { assertThat(l.getSerialConsistency().toString()).isEqualTo("SERIAL"); }); - simulacron.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearLogs(); // Execute query with profile, should use profile CLs session.execute(SimpleStatement.builder(query).setExecutionProfileName("cl").build()); log = - simulacron.cluster().getLogs().getQueryLogs().stream() + SIMULACRON_RULE.cluster().getLogs().getQueryLogs().stream() .filter(q -> q.getQuery().equals(query)) .findFirst(); @@ -171,57 +176,4 @@ public void should_use_profile_consistency() { }); } } - - @Test - public void should_use_profile_page_size() { - DriverConfigLoader loader = - SessionUtils.configLoaderBuilder() - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 100) - .startProfile("smallpages") - .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 10) - .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { - - CqlIdentifier keyspace = SessionUtils.uniqueKeyspaceId(); - DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); - SessionUtils.createKeyspace(session, keyspace, slowProfile); - - session.execute(String.format("USE %s", keyspace.asCql(false))); - - // load 500 rows (value beyond page size). - session.execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k int, v int, PRIMARY KEY (k,v))") - .setExecutionProfile(slowProfile) - .build()); - PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (0, ?)"); - BatchStatementBuilder bs = - BatchStatement.builder(DefaultBatchType.UNLOGGED).setExecutionProfile(slowProfile); - for (int i = 0; i < 500; i++) { - bs.addStatement(prepared.bind(i)); - } - session.execute(bs.build()); - - String query = "SELECT * FROM test where k=0"; - // Execute query without profile, should use global page size (100) - CompletionStage future = session.executeAsync(query); - AsyncResultSet result = CompletableFutures.getUninterruptibly(future); - assertThat(result.remaining()).isEqualTo(100); - result = CompletableFutures.getUninterruptibly(result.fetchNextPage()); - // next fetch should also be 100 pages. - assertThat(result.remaining()).isEqualTo(100); - - // Execute query with profile, should use profile page size - future = - session.executeAsync( - SimpleStatement.builder(query).setExecutionProfileName("smallpages").build()); - result = CompletableFutures.getUninterruptibly(future); - assertThat(result.remaining()).isEqualTo(10); - // next fetch should also be 10 pages. - result = CompletableFutures.getUninterruptibly(result.fetchNextPage()); - assertThat(result.remaining()).isEqualTo(10); - - SessionUtils.dropKeyspace(session, keyspace, slowProfile); - } - } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/config/MapBasedConfigLoaderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/MapBasedConfigLoaderIT.java new file mode 100644 index 00000000000..b8a6accce69 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/config/MapBasedConfigLoaderIT.java @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.config; + +import static com.datastax.oss.simulacron.common.codec.ConsistencyLevel.QUORUM; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.api.core.config.TypedDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; +import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.api.core.servererrors.WriteType; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.config.ConfigChangeEvent; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class MapBasedConfigLoaderIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + @Before + public void setup() { + SIMULACRON_RULE.cluster().clearPrimes(true); + } + + /** + * Checks that runtime changes to the pool size are reflected in the driver. This is a special + * case because unlike other options, the driver does not re-read the option at regular intervals; + * instead, it relies on the {@link ConfigChangeEvent} being fired. + */ + @Test + public void should_resize_pool_when_config_changes() { + OptionsMap optionsMap = OptionsMap.driverDefaults(); + + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) + .withLocalDatacenter("dc1") + .withConfigLoader(DriverConfigLoader.fromMap(optionsMap)) + .build()) { + + Node node = session.getMetadata().getNodes().values().iterator().next(); + assertThat(node.getOpenConnections()).isEqualTo(2); // control connection + pool (default 1) + + optionsMap.put(TypedDriverOption.CONNECTION_POOL_LOCAL_SIZE, 2); + + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> node.getOpenConnections() == 3); + } + } + + /** Checks that profiles that have specific policy options will get their own policy instance. */ + @Test + public void should_create_policies_per_profile() { + // Given + // a query that throws UNAVAILABLE + String mockQuery = "mock query"; + SIMULACRON_RULE.cluster().prime(when(mockQuery).then(unavailable(QUORUM, 3, 2))); + + // a default profile that uses the default retry policy, and an alternate profile that uses a + // policy that ignores all errors + OptionsMap optionsMap = OptionsMap.driverDefaults(); + String alternateProfile = "profile1"; + optionsMap.put( + alternateProfile, TypedDriverOption.RETRY_POLICY_CLASS, IgnoreAllPolicy.class.getName()); + + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) + .withLocalDatacenter("dc1") + .withConfigLoader(DriverConfigLoader.fromMap(optionsMap)) + .build()) { + + // When + // executing the query for the default profile + SimpleStatement defaultProfileStatement = SimpleStatement.newInstance(mockQuery); + assertThatThrownBy(() -> session.execute(defaultProfileStatement)) + .satisfies( + t -> { + // Then + // the UNAVAILABLE error is surfaced + assertThat(t).isInstanceOf(AllNodesFailedException.class); + AllNodesFailedException anfe = (AllNodesFailedException) t; + assertThat(anfe.getAllErrors()).hasSize(1); + List nodeErrors = anfe.getAllErrors().values().iterator().next(); + assertThat(nodeErrors).hasSize(1); + assertThat(nodeErrors.get(0)).isInstanceOf(UnavailableException.class); + }); + + // When + // executing the query for the alternate profile + SimpleStatement alternateProfileStatement = + SimpleStatement.newInstance(mockQuery).setExecutionProfileName(alternateProfile); + ResultSet rs = session.execute(alternateProfileStatement); + + // Then + // the error is ignored + assertThat(rs.one()).isNull(); + } + } + + public static class IgnoreAllPolicy implements RetryPolicy { + + public IgnoreAllPolicy( + @SuppressWarnings("unused") DriverContext context, + @SuppressWarnings("unused") String profile) { + // nothing to do + } + + @Override + @Deprecated + public RetryDecision onReadTimeout( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int blockFor, + int received, + boolean dataPresent, + int retryCount) { + return RetryDecision.IGNORE; + } + + @Override + @Deprecated + public RetryDecision onWriteTimeout( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + @NonNull WriteType writeType, + int blockFor, + int received, + int retryCount) { + return RetryDecision.IGNORE; + } + + @Override + @Deprecated + public RetryDecision onUnavailable( + @NonNull Request request, + @NonNull ConsistencyLevel cl, + int required, + int alive, + int retryCount) { + return RetryDecision.IGNORE; + } + + @Override + @Deprecated + public RetryDecision onRequestAborted( + @NonNull Request request, @NonNull Throwable error, int retryCount) { + return RetryDecision.IGNORE; + } + + @Override + @Deprecated + public RetryDecision onErrorResponse( + @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { + return RetryDecision.IGNORE; + } + + @Override + public void close() { + // nothing to do + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/connection/ChannelSocketOptionsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/ChannelSocketOptionsIT.java similarity index 82% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/connection/ChannelSocketOptionsIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/connection/ChannelSocketOptionsIT.java index 72ae2fdad61..177a0cd0a24 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/connection/ChannelSocketOptionsIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/ChannelSocketOptionsIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.connection; +package com.datastax.oss.driver.core.connection; import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_KEEP_ALIVE; import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SOCKET_LINGER_INTERVAL; @@ -49,7 +51,8 @@ @Category(ParallelizableTests.class) public class ChannelSocketOptionsIT { - private static SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); private static DriverConfigLoader loader = SessionUtils.configLoaderBuilder() @@ -61,14 +64,15 @@ public class ChannelSocketOptionsIT { .withInt(DefaultDriverOption.SOCKET_SEND_BUFFER_SIZE, 123456) .build(); - private static SessionRule sessionRule = - SessionRule.builder(simulacron).withConfigLoader(loader).build(); + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE).withConfigLoader(loader).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); @Test public void should_report_socket_options() { - Session session = sessionRule.session(); + Session session = SESSION_RULE.session(); DriverExecutionProfile config = session.getContext().getConfig().getDefaultProfile(); assertThat(config.getBoolean(SOCKET_TCP_NODELAY)).isTrue(); assertThat(config.getBoolean(SOCKET_KEEP_ALIVE)).isFalse(); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/connection/FrameLengthIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/FrameLengthIT.java similarity index 76% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/connection/FrameLengthIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/connection/FrameLengthIT.java index 5366d76e31b..887a578f7c4 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/connection/FrameLengthIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/FrameLengthIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.connection; +package com.datastax.oss.driver.core.connection; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; @@ -25,10 +27,11 @@ import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.connection.FrameTooLongException; import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; import com.datastax.oss.driver.api.testinfra.session.SessionRule; @@ -42,7 +45,7 @@ import java.nio.ByteBuffer; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -51,7 +54,8 @@ @Category(ParallelizableTests.class) public class FrameLengthIT { - private static SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); private static DriverConfigLoader loader = SessionUtils.configLoaderBuilder() @@ -61,10 +65,11 @@ public class FrameLengthIT { .withBytes(DefaultDriverOption.PROTOCOL_MAX_FRAME_LENGTH, 100 * 1024) .build(); - private static SessionRule sessionRule = - SessionRule.builder(simulacron).withConfigLoader(loader).build(); + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE).withConfigLoader(loader).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); private static final SimpleStatement LARGE_QUERY = SimpleStatement.newInstance("select * from foo").setIdempotent(true); @@ -73,21 +78,21 @@ public class FrameLengthIT { private static final Buffer ONE_HUNDRED_KB = ByteBuffer.allocate(100 * 1024).limit(100 * 1024); - @Before - public void primeQueries() { - simulacron + @BeforeClass + public static void primeQueries() { + SIMULACRON_RULE .cluster() .prime( when(LARGE_QUERY.getQuery()) .then(rows().row("result", ONE_HUNDRED_KB).columnTypes("result", "blob").build())); - simulacron + SIMULACRON_RULE .cluster() .prime(when(SLOW_QUERY.getQuery()).then(noRows()).delay(60, TimeUnit.SECONDS)); } @Test(expected = FrameTooLongException.class) public void should_fail_if_request_exceeds_max_frame_length() { - sessionRule + SESSION_RULE .session() .execute(SimpleStatement.newInstance("insert into foo (k) values (?)", ONE_HUNDRED_KB)); } @@ -95,9 +100,9 @@ public void should_fail_if_request_exceeds_max_frame_length() { @Test public void should_fail_if_response_exceeds_max_frame_length() { CompletionStage slowResultFuture = - sessionRule.session().executeAsync(SLOW_QUERY); + SESSION_RULE.session().executeAsync(SLOW_QUERY); try { - sessionRule.session().execute(LARGE_QUERY); + SESSION_RULE.session().execute(LARGE_QUERY); fail("Expected a " + FrameTooLongException.class.getSimpleName()); } catch (FrameTooLongException e) { // expected @@ -119,9 +124,9 @@ public AlwaysRetryAbortedPolicy(DriverContext context, String profileName) { } @Override - public RetryDecision onRequestAborted( + public RetryVerdict onRequestAbortedVerdict( @NonNull Request request, @NonNull Throwable error, int retryCount) { - return RetryDecision.RETRY_NEXT; + return RetryVerdict.RETRY_NEXT; } } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/NettyResourceLeakDetectionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/NettyResourceLeakDetectionIT.java new file mode 100644 index 00000000000..c605db151df --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/connection/NettyResourceLeakDetectionIT.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.connection; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.IsolatedTests; +import com.datastax.oss.driver.shaded.guava.common.base.Strings; +import com.datastax.oss.protocol.internal.Segment; +import com.datastax.oss.protocol.internal.util.Bytes; +import io.netty.util.ResourceLeakDetector; +import io.netty.util.ResourceLeakDetector.Level; +import java.nio.ByteBuffer; +import java.util.List; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; + +@Category(IsolatedTests.class) +@RunWith(MockitoJUnitRunner.class) +public class NettyResourceLeakDetectionIT { + + static { + ResourceLeakDetector.setLevel(Level.PARANOID); + } + + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + // Separately use BackendRequirementRule with @Rule so backend requirements are evaluated for each + // test method. + @Rule public final BackendRequirementRule backendRequirementRule = new BackendRequirementRule(); + + private static final ByteBuffer LARGE_PAYLOAD = + Bytes.fromHexString("0x" + Strings.repeat("ab", Segment.MAX_PAYLOAD_LENGTH + 100)); + + @Mock private Appender appender; + + @BeforeClass + public static void createTables() { + CqlSession session = SESSION_RULE.session(); + DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); + session.execute( + SimpleStatement.newInstance( + "CREATE TABLE IF NOT EXISTS leak_test_small (key int PRIMARY KEY, value int)") + .setExecutionProfile(slowProfile)); + session.execute( + SimpleStatement.newInstance( + "CREATE TABLE IF NOT EXISTS leak_test_large (key int PRIMARY KEY, value blob)") + .setExecutionProfile(slowProfile)); + } + + @Before + public void setupLogger() { + Logger logger = (Logger) LoggerFactory.getLogger(ResourceLeakDetector.class); + logger.setLevel(ch.qos.logback.classic.Level.ERROR); + logger.addAppender(appender); + } + + @After + public void resetLogger() { + Logger logger = (Logger) LoggerFactory.getLogger(ResourceLeakDetector.class); + logger.detachAppender(appender); + } + + @Test + public void should_not_leak_uncompressed() { + doLeakDetectionTest(SESSION_RULE.session()); + } + + @Test + public void should_not_leak_compressed_lz4() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, "lz4") + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace(), loader)) { + doLeakDetectionTest(session); + } + } + + @BackendRequirement( + type = BackendType.DSE, + description = "Snappy is not supported in OSS C* 4.0+ with protocol v5") + @BackendRequirement( + type = BackendType.CASSANDRA, + maxExclusive = "4.0.0", + description = "Snappy is not supported in OSS C* 4.0+ with protocol v5") + @Test + public void should_not_leak_compressed_snappy() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, "snappy") + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace(), loader)) { + doLeakDetectionTest(session); + } + } + + private void doLeakDetectionTest(CqlSession session) { + for (int i = 0; i < 10; i++) { + testSmallMessages(session); + verify(appender, never()).doAppend(any()); + System.gc(); + testLargeMessages(session); + verify(appender, never()).doAppend(any()); + System.gc(); + } + } + + private void testSmallMessages(CqlSession session) { + // trigger some activity using small requests and responses; in v5, these messages should fit in + // one single, self-contained segment + for (int i = 0; i < 1000; i++) { + session.execute("INSERT INTO leak_test_small (key, value) VALUES (?,?)", i, i); + } + List rows = session.execute("SELECT value FROM leak_test_small").all(); + assertThat(rows).hasSize(1000); + for (Row row : rows) { + assertThat(row).isNotNull(); + int actual = row.getInt(0); + assertThat(actual).isGreaterThanOrEqualTo(0).isLessThan(1000); + } + } + + private void testLargeMessages(CqlSession session) { + // trigger some activity using large requests and responses; in v5, these messages are likely to + // be split in multiple segments + for (int i = 0; i < 100; i++) { + session.execute( + "INSERT INTO leak_test_large (key, value) VALUES (?,?)", i, LARGE_PAYLOAD.duplicate()); + } + List rows = session.execute("SELECT value FROM leak_test_large").all(); + assertThat(rows).hasSize(100); + for (Row row : rows) { + assertThat(row).isNotNull(); + ByteBuffer actual = row.getByteBuffer(0); + assertThat(actual).isEqualTo(LARGE_PAYLOAD.duplicate()); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/context/LifecycleListenerIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/context/LifecycleListenerIT.java similarity index 77% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/context/LifecycleListenerIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/context/LifecycleListenerIT.java index 58600623734..3bd4add3003 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/context/LifecycleListenerIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/context/LifecycleListenerIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.context; +package com.datastax.oss.driver.core.context; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; import com.datastax.oss.driver.api.core.AllNodesFailedException; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; @@ -36,6 +37,7 @@ import edu.umd.cs.findbugs.annotations.NonNull; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -44,7 +46,7 @@ public class LifecycleListenerIT { @ClassRule - public static SimulacronRule simulacronRule = + public static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(1)); @Test @@ -54,11 +56,11 @@ public void should_notify_listener_of_init_and_shutdown() { assertThat(listener.closed).isFalse(); try (CqlSession session = newSession(listener)) { - ConditionChecker.checkThat(() -> listener.ready).before(1, SECONDS).becomesTrue(); + await().atMost(1, TimeUnit.SECONDS).until(() -> listener.ready); assertThat(listener.closed).isFalse(); } assertThat(listener.ready).isTrue(); - ConditionChecker.checkThat(() -> listener.closed).before(1, SECONDS).becomesTrue(); + await().atMost(1, TimeUnit.SECONDS).until(() -> listener.closed); } @Test @@ -67,21 +69,21 @@ public void should_not_notify_listener_when_init_fails() { assertThat(listener.ready).isFalse(); assertThat(listener.closed).isFalse(); - simulacronRule.cluster().rejectConnections(0, RejectScope.STOP); + SIMULACRON_RULE.cluster().rejectConnections(0, RejectScope.STOP); try (CqlSession session = newSession(listener)) { fail("Expected AllNodesFailedException"); } catch (AllNodesFailedException ignored) { } finally { - simulacronRule.cluster().acceptConnections(); + SIMULACRON_RULE.cluster().acceptConnections(); } assertThat(listener.ready).isFalse(); - ConditionChecker.checkThat(() -> listener.closed).before(1, SECONDS).becomesTrue(); + await().atMost(1, TimeUnit.SECONDS).until(() -> listener.closed); } private CqlSession newSession(TestLifecycleListener listener) { TestContext context = new TestContext(new DefaultDriverConfigLoader(), listener); return CompletableFutures.getUninterruptibly( - DefaultSession.init(context, simulacronRule.getContactPoints(), null)); + DefaultSession.init(context, SIMULACRON_RULE.getContactPoints(), null)); } public static class TestLifecycleListener implements LifecycleListener { diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/AsyncResultSetIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/AsyncResultSetIT.java similarity index 73% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/AsyncResultSetIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/AsyncResultSetIT.java index 83749d5259e..e109c28525e 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/AsyncResultSetIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/AsyncResultSetIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,13 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; import static org.assertj.core.api.Assertions.assertThat; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; @@ -42,31 +52,35 @@ public class AsyncResultSetIT { private static final String PARTITION_KEY1 = "part"; private static final String PARTITION_KEY2 = "part2"; - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = - SessionRule.builder(ccm) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, PAGE_SIZE) .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @BeforeClass public static void setupSchema() { // create table and load data across two partitions so we can test paging across tokens. - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k0 text, k1 int, v int, PRIMARY KEY(k0, k1))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); + SchemaChangeSynchronizer.withLock( + () -> { + SESSION_RULE + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test (k0 text, k1 int, v int, PRIMARY KEY(k0, k1))") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + }); PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO test (k0, k1, v) VALUES (?, ?, ?)"); + SESSION_RULE.session().prepare("INSERT INTO test (k0, k1, v) VALUES (?, ?, ?)"); BatchStatementBuilder batchPart1 = BatchStatement.builder(DefaultBatchType.UNLOGGED); BatchStatementBuilder batchPart2 = BatchStatement.builder(DefaultBatchType.UNLOGGED); @@ -76,12 +90,12 @@ public static void setupSchema() { prepared.bind(PARTITION_KEY2, i + ROWS_PER_PARTITION, i + ROWS_PER_PARTITION)); } - sessionRule + SESSION_RULE .session() - .execute(batchPart1.setExecutionProfile(sessionRule.slowProfile()).build()); - sessionRule + .execute(batchPart1.setExecutionProfile(SESSION_RULE.slowProfile()).build()); + SESSION_RULE .session() - .execute(batchPart2.setExecutionProfile(sessionRule.slowProfile()).build()); + .execute(batchPart2.setExecutionProfile(SESSION_RULE.slowProfile()).build()); } @Test @@ -89,7 +103,7 @@ public void should_only_iterate_over_rows_in_current_page() throws Exception { // very basic test that just ensures that iterating over an AsyncResultSet only visits the first // page. CompletionStage result = - sessionRule + SESSION_RULE .session() .executeAsync( SimpleStatement.builder("SELECT * FROM test where k0 = ?") @@ -115,7 +129,7 @@ public void should_only_iterate_over_rows_in_current_page() throws Exception { public void should_iterate_over_all_pages_asynchronously_single_partition() throws Exception { // Validates async paging behavior over single partition. CompletionStage result = - sessionRule + SESSION_RULE .session() .executeAsync( SimpleStatement.builder("SELECT * FROM test where k0 = ?") @@ -126,14 +140,14 @@ public void should_iterate_over_all_pages_asynchronously_single_partition() thro PageStatistics stats = result.toCompletableFuture().get(); assertThat(stats.rows).isEqualTo(ROWS_PER_PARTITION); - assertThat(stats.pages).isEqualTo((int) (Math.ceil(ROWS_PER_PARTITION / (double) PAGE_SIZE))); + assertThat(stats.pages).isEqualTo((int) Math.ceil(ROWS_PER_PARTITION / (double) PAGE_SIZE)); } @Test public void should_iterate_over_all_pages_asynchronously_cross_partition() throws Exception { // Validates async paging behavior over a range query. CompletionStage result = - sessionRule + SESSION_RULE .session() .executeAsync("SELECT * FROM test") .thenCompose(new AsyncResultSetConsumingFunction()); @@ -141,8 +155,7 @@ public void should_iterate_over_all_pages_asynchronously_cross_partition() throw PageStatistics stats = result.toCompletableFuture().get(); assertThat(stats.rows).isEqualTo(ROWS_PER_PARTITION * 2); - assertThat(stats.pages) - .isEqualTo((int) (Math.ceil(ROWS_PER_PARTITION * 2 / (double) PAGE_SIZE))); + assertThat(stats.pages).isEqualTo((int) Math.ceil(ROWS_PER_PARTITION * 2 / (double) PAGE_SIZE)); } private static class PageStatistics { diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/BatchStatementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BatchStatementIT.java similarity index 83% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/BatchStatementIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BatchStatementIT.java index 85f21a7a79c..8b652638729 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/BatchStatementIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BatchStatementIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,16 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; @@ -39,11 +53,11 @@ @Category(ParallelizableTests.class) public class BatchStatementIT { - private CcmRule ccm = CcmRule.getInstance(); + private CcmRule ccmRule = CcmRule.getInstance(); - private SessionRule sessionRule = SessionRule.builder(ccm).build(); + private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); - @Rule public TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); @Rule public TestName name = new TestName(); @@ -59,13 +73,16 @@ public void createTable() { "CREATE TABLE counter3 (k0 text PRIMARY KEY, c counter)", }; - for (String schemaStatement : schemaStatements) { - sessionRule - .session() - .execute( - SimpleStatement.newInstance(schemaStatement) - .setExecutionProfile(sessionRule.slowProfile())); - } + SchemaChangeSynchronizer.withLock( + () -> { + for (String schemaStatement : schemaStatements) { + sessionRule + .session() + .execute( + SimpleStatement.newInstance(schemaStatement) + .setExecutionProfile(sessionRule.slowProfile())); + } + }); } @Test @@ -111,7 +128,7 @@ public void should_execute_batch_of_bound_statements_with_variables() { } @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_execute_batch_of_bound_statements_with_unset_values() { // Build a batch of batchCount statements with bound statements, each with their own positional // variables. @@ -329,16 +346,20 @@ public void should_fail_counter_batch_with_non_counter_increment() { sessionRule.session().execute(batchStatement); } - @Test(expected = IllegalStateException.class) + @Test public void should_not_allow_unset_value_when_protocol_less_than_v4() { // CREATE TABLE test (k0 text, k1 int, v int, PRIMARY KEY (k0, k1)) DriverConfigLoader loader = SessionUtils.configLoaderBuilder() .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") .build(); - try (CqlSession v3Session = SessionUtils.newSession(ccm, sessionRule.keyspace(), loader)) { + try (CqlSession v3Session = SessionUtils.newSession(ccmRule, loader)) { + // Intentionally use fully qualified table here to avoid warnings as these are not supported + // by v3 protocol version, see JAVA-3068 PreparedStatement prepared = - v3Session.prepare("INSERT INTO test (k0, k1, v) values (?, ?, ?)"); + v3Session.prepare( + String.format( + "INSERT INTO %s.test (k0, k1, v) values (?, ?, ?)", sessionRule.keyspace())); BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.LOGGED); builder.addStatements( @@ -352,7 +373,9 @@ public void should_not_allow_unset_value_when_protocol_less_than_v4() { .unset(2) .build()); - v3Session.execute(builder.build()); + assertThatThrownBy(() -> v3Session.execute(builder.build())) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("Unset value at index"); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/BoundStatementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementCcmIT.java similarity index 58% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/BoundStatementIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementCcmIT.java index 7ea0b4c154a..9e4b62cd230 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/BoundStatementIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementCcmIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,84 +15,79 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.query; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assumptions.assumeThat; import com.datastax.oss.driver.api.core.ConsistencyLevel; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverTimeoutException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.core.type.codec.CqlIntToStringCodec; import com.datastax.oss.driver.internal.core.DefaultProtocolFeature; import com.datastax.oss.driver.internal.core.ProtocolVersionRegistry; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; -import com.datastax.oss.driver.internal.core.type.codec.CqlIntToStringCodec; import com.datastax.oss.driver.internal.core.util.RoutingKey; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Execute; import com.datastax.oss.protocol.internal.util.Bytes; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import java.nio.ByteBuffer; import java.time.Duration; import java.util.List; import java.util.Map; import java.util.concurrent.CompletionStage; -import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestName; import org.junit.rules.TestRule; @Category(ParallelizableTests.class) -public class BoundStatementIT { - - @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); +public class BoundStatementCcmIT { - private CcmRule ccm = CcmRule.getInstance(); + private CcmRule ccmRule = CcmRule.getInstance(); - private final boolean atLeastV4 = ccm.getHighestProtocolVersion().getCode() >= 4; + private final boolean atLeastV4 = ccmRule.getHighestProtocolVersion().getCode() >= 4; private SessionRule sessionRule = - SessionRule.builder(ccm) + SessionRule.builder(ccmRule) .withConfigLoader( SessionUtils.configLoaderBuilder() .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 20) .build()) .build(); - @Rule public TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); @Rule public TestName name = new TestName(); - @Rule public ExpectedException thrown = ExpectedException.none(); - private static final String KEY = "test"; private static final int VALUE = 7; @@ -98,68 +95,72 @@ public class BoundStatementIT { @Before public void setupSchema() { // table where every column forms the primary key. - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test (k text, v int, PRIMARY KEY(k, v))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - for (int i = 0; i < 100; i++) { - sessionRule - .session() - .execute( - SimpleStatement.builder("INSERT INTO test (k, v) VALUES (?, ?)") - .addPositionalValues(KEY, i) - .build()); - } - - // table with simple primary key, single cell. - sessionRule - .session() - .execute( - SimpleStatement.builder("CREATE TABLE IF NOT EXISTS test2 (k text primary key, v0 int)") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - - // table with composite partition key - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test3 " - + "(pk1 int, pk2 int, v int, " - + "PRIMARY KEY ((pk1, pk2)))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); + SchemaChangeSynchronizer.withLock( + () -> { + sessionRule + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test (k text, v int, PRIMARY KEY(k, v))") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + for (int i = 0; i < 100; i++) { + sessionRule + .session() + .execute( + SimpleStatement.builder("INSERT INTO test (k, v) VALUES (?, ?)") + .addPositionalValues(KEY, i) + .build()); + } + + // table with simple primary key, single cell. + sessionRule + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test2 (k text primary key, v0 int)") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + + // table with composite partition key + sessionRule + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test3 " + + "(pk1 int, pk2 int, v int, " + + "PRIMARY KEY ((pk1, pk2)))") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + }); } - @Before - public void clearPrimes() { - simulacron.cluster().clearLogs(); - simulacron.cluster().clearPrimes(true); - } - - @Test(expected = IllegalStateException.class) + @Test public void should_not_allow_unset_value_when_protocol_less_than_v4() { DriverConfigLoader loader = SessionUtils.configLoaderBuilder() .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") .build(); - try (CqlSession v3Session = SessionUtils.newSession(ccm, sessionRule.keyspace(), loader)) { - PreparedStatement prepared = v3Session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); + try (CqlSession v3Session = SessionUtils.newSession(ccmRule, loader)) { + // Intentionally use fully qualified table here to avoid warnings as these are not supported + // by v3 protocol version, see JAVA-3068 + PreparedStatement prepared = + v3Session.prepare( + String.format("INSERT INTO %s.test2 (k, v0) values (?, ?)", sessionRule.keyspace())); BoundStatement boundStatement = prepared.boundStatementBuilder().setString(0, name.getMethodName()).unset(1).build(); - v3Session.execute(boundStatement); + assertThatThrownBy(() -> v3Session.execute(boundStatement)) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("Unset value at index"); } } @Test public void should_not_write_tombstone_if_value_is_implicitly_unset() { assumeThat(atLeastV4).as("unset values require protocol V4+").isTrue(); - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); session.execute(prepared.bind(name.getMethodName(), VALUE)); @@ -174,7 +175,7 @@ public void should_not_write_tombstone_if_value_is_implicitly_unset() { @Test public void should_write_tombstone_if_value_is_explicitly_unset() { assumeThat(atLeastV4).as("unset values require protocol V4+").isTrue(); - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); session.execute(prepared.bind(name.getMethodName(), VALUE)); @@ -193,7 +194,7 @@ public void should_write_tombstone_if_value_is_explicitly_unset() { @Test public void should_write_tombstone_if_value_is_explicitly_unset_on_builder() { assumeThat(atLeastV4).as("unset values require protocol V4+").isTrue(); - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); session.execute(prepared.bind(name.getMethodName(), VALUE)); @@ -212,7 +213,7 @@ public void should_write_tombstone_if_value_is_explicitly_unset_on_builder() { @Test public void should_have_empty_result_definitions_for_update_query() { - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); assertThat(prepared.getResultSetDefinitions()).hasSize(0); @@ -224,7 +225,7 @@ public void should_have_empty_result_definitions_for_update_query() { @Test public void should_bind_null_value_when_setting_values_in_bulk() { - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { PreparedStatement prepared = session.prepare("INSERT INTO test2 (k, v0) values (?, ?)"); BoundStatement boundStatement = prepared.bind(name.getMethodName(), null); assertThat(boundStatement.get(1, TypeCodecs.INT)).isNull(); @@ -254,7 +255,7 @@ public void should_allow_custom_codecs_when_setting_values_in_bulk() { @Test public void should_use_page_size_from_simple_statement() { - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { SimpleStatement st = SimpleStatement.builder("SELECT v FROM test").setPageSize(10).build(); PreparedStatement prepared = session.prepare(st); CompletionStage future = session.executeAsync(prepared.bind()); @@ -267,7 +268,7 @@ public void should_use_page_size_from_simple_statement() { @Test public void should_use_page_size() { - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { // set page size on simple statement, but will be unused since // overridden by bound statement. SimpleStatement st = SimpleStatement.builder("SELECT v FROM test").setPageSize(10).build(); @@ -281,132 +282,6 @@ public void should_use_page_size() { } } - @Test - public void should_use_consistencies_from_simple_statement() { - try (CqlSession session = SessionUtils.newSession(simulacron)) { - SimpleStatement st = - SimpleStatement.builder("SELECT * FROM test where k = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO) - .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - PreparedStatement prepared = session.prepare(st); - simulacron.cluster().clearLogs(); - // since query is unprimed, we use a text value for bind parameter as this is - // what simulacron expects for unprimed statements. - session.execute(prepared.bind("0")); - - List logs = simulacron.cluster().getLogs().getQueryLogs(); - assertThat(logs).hasSize(1); - - QueryLog log = logs.get(0); - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Execute.class); - Execute execute = (Execute) message; - assertThat(execute.options.consistency) - .isEqualTo(DefaultConsistencyLevel.TWO.getProtocolCode()); - assertThat(execute.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL.getProtocolCode()); - } - } - - @Test - public void should_use_consistencies() { - try (CqlSession session = SessionUtils.newSession(simulacron)) { - // set consistencies on simple statement, but they will be unused since - // overridden by bound statement. - SimpleStatement st = - SimpleStatement.builder("SELECT * FROM test where k = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO) - .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - PreparedStatement prepared = session.prepare(st); - simulacron.cluster().clearLogs(); - // since query is unprimed, we use a text value for bind parameter as this is - // what simulacron expects for unprimed statements. - session.execute( - prepared - .boundStatementBuilder("0") - .setConsistencyLevel(DefaultConsistencyLevel.THREE) - .setSerialConsistencyLevel(DefaultConsistencyLevel.SERIAL) - .build()); - - List logs = simulacron.cluster().getLogs().getQueryLogs(); - assertThat(logs).hasSize(1); - - QueryLog log = logs.get(0); - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Execute.class); - Execute execute = (Execute) message; - assertThat(execute.options.consistency) - .isEqualTo(DefaultConsistencyLevel.THREE.getProtocolCode()); - assertThat(execute.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.SERIAL.getProtocolCode()); - } - } - - @Test - public void should_use_timeout_from_simple_statement() { - try (CqlSession session = SessionUtils.newSession(simulacron)) { - Map params = ImmutableMap.of("k", 0); - Map paramTypes = ImmutableMap.of("k", "int"); - simulacron - .cluster() - .prime( - when(query( - "mock query", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE), - params, - paramTypes)) - .then(noRows()) - .delay(1500, TimeUnit.MILLISECONDS)); - SimpleStatement st = - SimpleStatement.builder("mock query") - .setTimeout(Duration.ofSeconds(1)) - .setConsistencyLevel(DefaultConsistencyLevel.ONE) - .build(); - PreparedStatement prepared = session.prepare(st); - - thrown.expect(DriverTimeoutException.class); - thrown.expectMessage("Query timed out after PT1S"); - - session.execute(prepared.bind(0)); - } - } - - @Test - public void should_use_timeout() { - try (CqlSession session = SessionUtils.newSession(simulacron)) { - Map params = ImmutableMap.of("k", 0); - Map paramTypes = ImmutableMap.of("k", "int"); - // set timeout on simple statement, but will be unused since overridden by bound statement. - simulacron - .cluster() - .prime( - when(query( - "mock query", - Lists.newArrayList( - com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE), - params, - paramTypes)) - .then(noRows()) - .delay(1500, TimeUnit.MILLISECONDS)); - SimpleStatement st = - SimpleStatement.builder("mock query") - .setTimeout(Duration.ofSeconds(1)) - .setConsistencyLevel(DefaultConsistencyLevel.ONE) - .build(); - PreparedStatement prepared = session.prepare(st); - - thrown.expect(DriverTimeoutException.class); - thrown.expectMessage("Query timed out after PT0.15S"); - - session.execute(prepared.bind(0).setTimeout(Duration.ofMillis(150))); - } - } - @Test public void should_propagate_attributes_when_preparing_a_simple_statement() { CqlSession session = sessionRule.session(); @@ -418,7 +293,6 @@ public void should_propagate_attributes_when_preparing_a_simple_statement() { .getDefaultProfile() // Value doesn't matter, we just want a distinct profile .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(10)); - String mockConfigProfileName = "mockConfigProfileName"; ByteBuffer mockPagingState = Bytes.fromHexString("0xaaaa"); CqlIdentifier mockKeyspace = supportsPerRequestKeyspace(session) ? CqlIdentifier.fromCql("system") : null; @@ -435,7 +309,6 @@ public void should_propagate_attributes_when_preparing_a_simple_statement() { SimpleStatementBuilder simpleStatementBuilder = SimpleStatement.builder("SELECT release_version FROM system.local") .setExecutionProfile(mockProfile) - .setExecutionProfileName(mockConfigProfileName) .setPagingState(mockPagingState) .setKeyspace(mockKeyspace) .setRoutingKeyspace(mockRoutingKeyspace) @@ -464,7 +337,6 @@ public void should_propagate_attributes_when_preparing_a_simple_statement() { BoundStatement boundStatement = createMethod.apply(preparedStatement); assertThat(boundStatement.getExecutionProfile()).isEqualTo(mockProfile); - assertThat(boundStatement.getExecutionProfileName()).isEqualTo(mockConfigProfileName); assertThat(boundStatement.getPagingState()).isEqualTo(mockPagingState); assertThat(boundStatement.getRoutingKeyspace()) .isEqualTo(mockKeyspace != null ? mockKeyspace : mockRoutingKeyspace); @@ -483,15 +355,15 @@ public void should_propagate_attributes_when_preparing_a_simple_statement() { // Bound statements do not support per-query keyspaces, so this is not set assertThat(boundStatement.getKeyspace()).isNull(); // Should not be propagated - assertThat(boundStatement.getQueryTimestamp()).isEqualTo(Long.MIN_VALUE); + assertThat(boundStatement.getQueryTimestamp()).isEqualTo(Statement.NO_DEFAULT_TIMESTAMP); } } // Test for JAVA-2066 @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_compute_routing_key_when_indices_randomly_distributed() { - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace())) { + try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { PreparedStatement ps = session.prepare("INSERT INTO test3 (v, pk2, pk1) VALUES (?,?,?)"); @@ -506,6 +378,44 @@ public void should_compute_routing_key_when_indices_randomly_distributed() { } } + @Test + public void should_set_all_occurrences_of_variable() { + CqlSession session = sessionRule.session(); + PreparedStatement ps = session.prepare("INSERT INTO test3 (pk1, pk2, v) VALUES (:i, :i, :i)"); + + CqlIdentifier id = CqlIdentifier.fromCql("i"); + ColumnDefinitions variableDefinitions = ps.getVariableDefinitions(); + assertThat(variableDefinitions.allIndicesOf(id)).containsExactly(0, 1, 2); + + should_set_all_occurrences_of_variable(ps.bind().setInt(id, 12)); + should_set_all_occurrences_of_variable(ps.boundStatementBuilder().setInt(id, 12).build()); + } + + private void should_set_all_occurrences_of_variable(BoundStatement bs) { + assertThat(bs.getInt(0)).isEqualTo(12); + assertThat(bs.getInt(1)).isEqualTo(12); + assertThat(bs.getInt(2)).isEqualTo(12); + + // Nothing should be shared internally (this would be a bug if the client later retrieves a + // buffer with getBytesUnsafe and modifies it) + ByteBuffer bytes0 = bs.getBytesUnsafe(0); + ByteBuffer bytes1 = bs.getBytesUnsafe(1); + assertThat(bytes0).isNotNull(); + assertThat(bytes1).isNotNull(); + // Not the same instance + assertThat(bytes0).isNotSameAs(bytes1); + // Contents are not shared + bytes0.putInt(0, 11); + assertThat(bytes1.getInt(0)).isEqualTo(12); + bytes0.putInt(0, 12); + + CqlSession session = sessionRule.session(); + session.execute(bs); + Row row = session.execute("SELECT * FROM test3 WHERE pk1 = 12 AND pk2 = 12").one(); + assertThat(row).isNotNull(); + assertThat(row.getInt("v")).isEqualTo(12); + } + private static void verifyUnset( CqlSession session, BoundStatement boundStatement, String valueName) { session.execute(boundStatement.unset(1)); @@ -526,7 +436,7 @@ private static void verifyUnset( private CqlSession sessionWithCustomCodec(CqlIntToStringCodec codec) { return (CqlSession) SessionUtils.baseBuilder() - .addContactEndPoints(ccm.getContactPoints()) + .addContactEndPoints(ccmRule.getContactPoints()) .withKeyspace(sessionRule.keyspace()) .addTypeCodecs(codec) .build(); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementSimulacronIT.java new file mode 100644 index 00000000000..cb81874d47a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/BoundStatementSimulacronIT.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.query; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.request.Execute; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.cluster.QueryLog; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import java.time.Duration; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class BoundStatementSimulacronIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + @Before + public void clearPrimes() { + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + } + + @Test + public void should_use_consistencies_from_simple_statement() { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { + SimpleStatement st = + SimpleStatement.builder("SELECT * FROM test where k = ?") + .setConsistencyLevel(DefaultConsistencyLevel.TWO) + .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) + .build(); + PreparedStatement prepared = session.prepare(st); + SIMULACRON_RULE.cluster().clearLogs(); + // since query is unprimed, we use a text value for bind parameter as this is + // what simulacron expects for unprimed statements. + session.execute(prepared.bind("0")); + + List logs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); + assertThat(logs).hasSize(1); + + QueryLog log = logs.get(0); + + Message message = log.getFrame().message; + assertThat(message).isInstanceOf(Execute.class); + Execute execute = (Execute) message; + assertThat(execute.options.consistency) + .isEqualTo(DefaultConsistencyLevel.TWO.getProtocolCode()); + assertThat(execute.options.serialConsistency) + .isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL.getProtocolCode()); + } + } + + @Test + public void should_use_consistencies() { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { + // set consistencies on simple statement, but they will be unused since + // overridden by bound statement. + SimpleStatement st = + SimpleStatement.builder("SELECT * FROM test where k = ?") + .setConsistencyLevel(DefaultConsistencyLevel.TWO) + .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) + .build(); + PreparedStatement prepared = session.prepare(st); + SIMULACRON_RULE.cluster().clearLogs(); + // since query is unprimed, we use a text value for bind parameter as this is + // what simulacron expects for unprimed statements. + session.execute( + prepared + .boundStatementBuilder("0") + .setConsistencyLevel(DefaultConsistencyLevel.THREE) + .setSerialConsistencyLevel(DefaultConsistencyLevel.SERIAL) + .build()); + + List logs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); + assertThat(logs).hasSize(1); + + QueryLog log = logs.get(0); + + Message message = log.getFrame().message; + assertThat(message).isInstanceOf(Execute.class); + Execute execute = (Execute) message; + assertThat(execute.options.consistency) + .isEqualTo(DefaultConsistencyLevel.THREE.getProtocolCode()); + assertThat(execute.options.serialConsistency) + .isEqualTo(DefaultConsistencyLevel.SERIAL.getProtocolCode()); + } + } + + @Test + public void should_use_timeout_from_simple_statement() { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { + LinkedHashMap params = new LinkedHashMap<>(ImmutableMap.of("k", 0)); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("k", "int")); + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "mock query", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE), + params, + paramTypes)) + .then(noRows()) + .delay(1500, TimeUnit.MILLISECONDS)); + SimpleStatement st = + SimpleStatement.builder("mock query") + .setTimeout(Duration.ofSeconds(1)) + .setConsistencyLevel(DefaultConsistencyLevel.ONE) + .build(); + PreparedStatement prepared = session.prepare(st); + + Throwable t = catchThrowable(() -> session.execute(prepared.bind(0))); + + assertThat(t) + .isInstanceOf(DriverTimeoutException.class) + .hasMessage("Query timed out after PT1S"); + } + } + + @Test + public void should_use_timeout() { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { + LinkedHashMap params = new LinkedHashMap<>(ImmutableMap.of("k", 0)); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("k", "int")); + // set timeout on simple statement, but will be unused since overridden by bound statement. + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "mock query", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE), + params, + paramTypes)) + .then(noRows()) + .delay(1500, TimeUnit.MILLISECONDS)); + SimpleStatement st = + SimpleStatement.builder("mock query") + .setTimeout(Duration.ofSeconds(1)) + .setConsistencyLevel(DefaultConsistencyLevel.ONE) + .build(); + PreparedStatement prepared = session.prepare(st); + + Throwable t = + catchThrowable( + () -> session.execute(prepared.bind(0).setTimeout(Duration.ofMillis(150)))); + + assertThat(t) + .isInstanceOf(DriverTimeoutException.class) + .hasMessage("Query timed out after PT0.15S"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/ExecutionInfoWarningsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/ExecutionInfoWarningsIT.java similarity index 78% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/ExecutionInfoWarningsIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/ExecutionInfoWarningsIT.java index 67d473b5c3e..edee9723a38 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/ExecutionInfoWarningsIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/ExecutionInfoWarningsIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +16,7 @@ * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.timeout; @@ -26,8 +28,14 @@ import ch.qos.logback.core.Appender; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.internal.core.cql.CqlRequestHandler; @@ -53,13 +61,13 @@ public class ExecutionInfoWarningsIT { private static final String KEY = "test"; - private final CustomCcmRule ccm = + private CustomCcmRule ccmRule = new CustomCcmRule.Builder() // set the warn threshold to 5Kb (default is 64Kb in newer versions) .withCassandraConfiguration("batch_size_warn_threshold_in_kb", "5") .build(); - private final SessionRule sessionRule = - SessionRule.builder(ccm) + private SessionRule sessionRule = + SessionRule.builder(ccmRule) .withConfigLoader( SessionUtils.configLoaderBuilder() .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 20) @@ -71,7 +79,7 @@ public class ExecutionInfoWarningsIT { .build()) .build(); - @Rule public final TestRule ccmRule = RuleChain.outerRule(ccm).around(sessionRule); + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); @Mock private Appender appender; @Captor private ArgumentCaptor loggingEventCaptor; @@ -79,14 +87,18 @@ public class ExecutionInfoWarningsIT { private Level originalLoggerLevel; @Before - public void setupLogger() { + public void createSchema() { // table with simple primary key, single cell. - sessionRule - .session() - .execute( - SimpleStatement.builder("CREATE TABLE IF NOT EXISTS test (k int primary key, v text)") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); + SchemaChangeSynchronizer.withLock( + () -> { + sessionRule + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test (k int primary key, v text)") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + }); for (int i = 0; i < 100; i++) { sessionRule .session() @@ -95,7 +107,10 @@ public void setupLogger() { .addPositionalValues(KEY, i) .build()); } - // setup the log appender + } + + @Before + public void setupLogger() { logger = (Logger) LoggerFactory.getLogger(CqlRequestHandler.class); originalLoggerLevel = logger.getLevel(); logger.setLevel(Level.WARN); @@ -109,7 +124,7 @@ public void cleanupLogger() { } @Test - @CassandraRequirement(min = "3.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "3.0") public void should_execute_query_and_log_server_side_warnings() { final String query = "SELECT count(*) FROM test;"; Statement st = SimpleStatement.builder(query).build(); @@ -133,7 +148,7 @@ public void should_execute_query_and_log_server_side_warnings() { } @Test - @CassandraRequirement(min = "3.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "3.0") public void should_execute_query_and_not_log_server_side_warnings() { final String query = "SELECT count(*) FROM test;"; Statement st = @@ -151,7 +166,7 @@ public void should_execute_query_and_not_log_server_side_warnings() { } @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_expose_warnings_on_execution_info() { // the default batch size warn threshold is 5 * 1024 bytes, but after CASSANDRA-10876 there must // be multiple mutations in a batch to trigger this warning so the batch includes 2 different @@ -189,6 +204,6 @@ public void should_expose_warnings_on_execution_info() { .contains("for") .contains(String.format("%s.test", sessionRule.keyspace().asCql(true))) .contains("is of size") - .contains("exceeding specified threshold")); + .containsPattern("exceeding specified .*threshold")); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/NowInSecondsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/NowInSecondsIT.java new file mode 100644 index 00000000000..191dc040ffd --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/NowInSecondsIT.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.function.Function; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") +// Use next version -- not sure if it will be in by then, but as a reminder to check +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "7.0", + description = "Feature not available in DSE yet") +public class NowInSecondsIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Before + public void setup() { + for (String statement : + ImmutableList.of( + "DROP TABLE IF EXISTS test", "CREATE TABLE test(k int PRIMARY KEY, v int)")) { + SESSION_RULE + .session() + .execute( + SimpleStatement.newInstance(statement) + .setExecutionProfile(SESSION_RULE.slowProfile())); + } + } + + @Test + public void should_use_now_in_seconds_with_simple_statement() { + should_use_now_in_seconds(SimpleStatement::newInstance); + } + + @Test + public void should_use_now_in_seconds_with_bound_statement() { + should_use_now_in_seconds( + queryString -> { + PreparedStatement preparedStatement = SESSION_RULE.session().prepare(queryString); + return preparedStatement.bind(); + }); + } + + @Test + public void should_use_now_in_seconds_with_batch_statement() { + should_use_now_in_seconds( + queryString -> + BatchStatement.newInstance(BatchType.LOGGED, SimpleStatement.newInstance(queryString))); + } + + private > void should_use_now_in_seconds( + Function buildWriteStatement) { + CqlSession session = SESSION_RULE.session(); + + // Given + StatementT writeStatement = + buildWriteStatement.apply("INSERT INTO test (k,v) VALUES (1,1) USING TTL 20"); + SimpleStatement readStatement = + SimpleStatement.newInstance("SELECT TTL(v) FROM test WHERE k = 1"); + + // When + // insert at t = 0 with TTL 20 + session.execute(writeStatement.setNowInSeconds(0)); + // read TTL at t = 10 + ResultSet rs = session.execute(readStatement.setNowInSeconds(10)); + int remainingTtl = rs.one().getInt(0); + + // Then + assertThat(remainingTtl).isEqualTo(10); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PagingIterableSpliteratorIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingIterableSpliteratorIT.java similarity index 75% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PagingIterableSpliteratorIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingIterableSpliteratorIT.java index 476510097b1..02078b683db 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PagingIterableSpliteratorIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingIterableSpliteratorIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,13 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; import static org.assertj.core.api.Assertions.assertThat; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; @@ -43,37 +52,38 @@ @Category(ParallelizableTests.class) public class PagingIterableSpliteratorIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @BeforeClass public static void setupSchema() { - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder( "CREATE TABLE IF NOT EXISTS test (k0 int, k1 int, v int, PRIMARY KEY(k0, k1))") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO test (k0, k1, v) VALUES (?, ?, ?)"); + SESSION_RULE.session().prepare("INSERT INTO test (k0, k1, v) VALUES (?, ?, ?)"); for (int i = 0; i < 20_000; i += 1_000) { BatchStatementBuilder batch = BatchStatement.builder(DefaultBatchType.UNLOGGED); for (int j = 0; j < 1_000; j++) { int n = i + j; batch.addStatement(prepared.bind(0, n, n)); } - sessionRule.session().execute(batch.setExecutionProfile(sessionRule.slowProfile()).build()); + SESSION_RULE.session().execute(batch.setExecutionProfile(SESSION_RULE.slowProfile()).build()); } } @Test @UseDataProvider("pageSizes") public void should_consume_spliterator(int pageSize, boolean parallel) throws Exception { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); DriverExecutionProfile profile = session .getContext() diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingStateIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingStateIT.java new file mode 100644 index 00000000000..6d33f35238a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PagingStateIT.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PagingState; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.type.codec.IntCodec; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.function.UnaryOperator; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class PagingStateIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule public static TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Before + public void setupSchema() { + CqlSession session = SESSION_RULE.session(); + SchemaChangeSynchronizer.withLock( + () -> { + session.execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS foo (k int, cc int, v int, PRIMARY KEY(k, cc))") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + }); + for (int i = 0; i < 20; i++) { + session.execute( + SimpleStatement.newInstance("INSERT INTO foo (k, cc, v) VALUES (1, ?, ?)", i, i)); + } + } + + @Test + public void should_extract_and_reuse() { + should_extract_and_reuse(UnaryOperator.identity()); + } + + @Test + public void should_convert_to_bytes() { + should_extract_and_reuse(pagingState -> PagingState.fromBytes(pagingState.toBytes())); + } + + @Test + public void should_convert_to_string() { + should_extract_and_reuse(pagingState -> PagingState.fromString(pagingState.toString())); + } + + private void should_extract_and_reuse(UnaryOperator transformation) { + CqlSession session = SESSION_RULE.session(); + + BoundStatement boundStatement = + session + .prepare(SimpleStatement.newInstance("SELECT * FROM foo WHERE k = ?").setPageSize(15)) + .bind(1); + + ResultSet resultSet = session.execute(boundStatement); + assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(15); + assertThat(resultSet.isFullyFetched()).isFalse(); + + PagingState pagingState = + transformation.apply(resultSet.getExecutionInfo().getSafePagingState()); + + assertThat(pagingState.matches(boundStatement)).isTrue(); + resultSet = session.execute(boundStatement.setPagingState(pagingState)); + assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(5); + assertThat(resultSet.isFullyFetched()).isTrue(); + } + + @Test + public void should_inject_in_simple_statement_with_custom_codecs() { + try (CqlSession session = + (CqlSession) + SessionUtils.baseBuilder() + .addTypeCodecs(new IntWrapperCodec()) + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withKeyspace(SESSION_RULE.keyspace()) + .build()) { + + SimpleStatement statement = + SimpleStatement.newInstance("SELECT * FROM foo WHERE k = ?", new IntWrapper(1)) + .setPageSize(15); + + ResultSet resultSet = session.execute(statement); + assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(15); + assertThat(resultSet.isFullyFetched()).isFalse(); + + PagingState pagingState = resultSet.getExecutionInfo().getSafePagingState(); + + // This is the case where we need the session: simple statements are not attached, so + // setPagingState() cannot find the custom codec. + try { + @SuppressWarnings("unused") + SimpleStatement ignored = statement.setPagingState(pagingState); + fail("Expected a CodecNotFoundException"); + } catch (CodecNotFoundException e) { + // expected + } + + resultSet = session.execute(statement.setPagingState(pagingState, session)); + assertThat(resultSet.getAvailableWithoutFetching()).isEqualTo(5); + assertThat(resultSet.isFullyFetched()).isTrue(); + } + } + + @Test + public void should_fail_if_query_does_not_match() { + should_fail("SELECT * FROM foo WHERE k = ?", 1, "SELECT v FROM FOO WHERE k = ?", 1); + } + + @Test + public void should_fail_if_values_do_not_match() { + should_fail("SELECT * FROM foo WHERE k = ?", 1, "SELECT * FROM foo WHERE k = ?", 2); + } + + private void should_fail(String query1, int value1, String query2, int value2) { + CqlSession session = SESSION_RULE.session(); + + BoundStatement boundStatement1 = + session.prepare(SimpleStatement.newInstance(query1).setPageSize(15)).bind(value1); + + ResultSet resultSet = session.execute(boundStatement1); + PagingState pagingState = resultSet.getExecutionInfo().getSafePagingState(); + + @SuppressWarnings("ResultOfMethodCallIgnored") + Throwable t = + catchThrowable( + () -> + session + .prepare(SimpleStatement.newInstance(query2).setPageSize(15)) + .bind(value2) + .setPagingState(pagingState)); + + assertThat(t).isInstanceOf(IllegalArgumentException.class); + } + + static class IntWrapper { + final int value; + + public IntWrapper(int value) { + this.value = value; + } + } + + static class IntWrapperCodec extends MappingCodec { + + protected IntWrapperCodec() { + super(new IntCodec(), GenericType.of(IntWrapper.class)); + } + + @Nullable + @Override + protected IntWrapper innerToOuter(@Nullable Integer value) { + return value == null ? null : new IntWrapper(value); + } + + @Nullable + @Override + protected Integer outerToInner(@Nullable IntWrapper wrapper) { + return wrapper == null ? null : wrapper.value; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PerRequestKeyspaceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PerRequestKeyspaceIT.java similarity index 61% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PerRequestKeyspaceIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PerRequestKeyspaceIT.java index 1cd46307efe..9eb883144db 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PerRequestKeyspaceIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PerRequestKeyspaceIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,23 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; +import java.nio.ByteBuffer; +import java.time.Duration; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestName; import org.junit.rules.TestRule; @@ -51,29 +64,31 @@ public class PerRequestKeyspaceIT { @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - @Rule public ExpectedException thrown = ExpectedException.none(); @Rule public TestName nameRule = new TestName(); @Before public void setupSchema() { - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS foo (k text, cc int, v int, PRIMARY KEY(k, cc))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); + SchemaChangeSynchronizer.withLock( + () -> { + sessionRule + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS foo (k text, cc int, v int, PRIMARY KEY(k, cc))") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + }); } @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_reject_simple_statement_with_keyspace_in_protocol_v4() { should_reject_statement_with_keyspace_in_protocol_v4( SimpleStatement.newInstance("SELECT * FROM foo").setKeyspace(sessionRule.keyspace())); } @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_reject_batch_statement_with_explicit_keyspace_in_protocol_v4() { SimpleStatement statementWithoutKeyspace = SimpleStatement.newInstance( @@ -86,7 +101,7 @@ public void should_reject_batch_statement_with_explicit_keyspace_in_protocol_v4( } @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_reject_batch_statement_with_inferred_keyspace_in_protocol_v4() { SimpleStatement statementWithKeyspace = SimpleStatement.newInstance( @@ -104,14 +119,15 @@ private void should_reject_statement_with_keyspace_in_protocol_v4(Statement stat .withString(DefaultDriverOption.PROTOCOL_VERSION, "V4") .build(); try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Can't use per-request keyspace with protocol V4"); - session.execute(statement); + Throwable t = catchThrowable(() -> session.execute(statement)); + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("Can't use per-request keyspace with protocol V4"); } } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_execute_simple_statement_with_keyspace() { CqlSession session = sessionRule.session(); session.execute( @@ -129,7 +145,7 @@ public void should_execute_simple_statement_with_keyspace() { } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_execute_batch_with_explicit_keyspace() { CqlSession session = sessionRule.session(); session.execute( @@ -153,7 +169,7 @@ public void should_execute_batch_with_explicit_keyspace() { } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_execute_batch_with_inferred_keyspace() { CqlSession session = sessionRule.session(); session.execute( @@ -185,7 +201,7 @@ public void should_execute_batch_with_inferred_keyspace() { } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_prepare_statement_with_keyspace() { CqlSession session = sessionRule.session(); PreparedStatement prepared = @@ -203,4 +219,47 @@ public void should_prepare_statement_with_keyspace() { .one(); assertThat(row.getInt(0)).isEqualTo(1); } + + @Test + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") + public void should_reprepare_statement_with_keyspace_on_the_fly() { + // Create a separate session because we don't want it to have a default keyspace + SchemaChangeSynchronizer.withLock( + () -> { + try (CqlSession session = SessionUtils.newSession(ccmRule)) { + executeDdl( + session, + String.format( + "CREATE TABLE IF NOT EXISTS %s.bar (k int primary key)", + sessionRule.keyspace())); + PreparedStatement pst = + session.prepare( + SimpleStatement.newInstance("SELECT * FROM bar WHERE k=?") + .setKeyspace(sessionRule.keyspace())); + + // Drop and re-create the table to invalidate the prepared statement server side + executeDdl(session, String.format("DROP TABLE %s.bar", sessionRule.keyspace())); + executeDdl( + session, + String.format("CREATE TABLE %s.bar (k int primary key)", sessionRule.keyspace())); + assertThat(preparedStatementExistsOnServer(session, pst.getId())).isFalse(); + + // This will re-prepare on the fly + session.execute(pst.bind(0)); + assertThat(preparedStatementExistsOnServer(session, pst.getId())).isTrue(); + } + }); + } + + private void executeDdl(CqlSession session, String query) { + session.execute(SimpleStatement.builder(query).setTimeout(Duration.ofSeconds(30)).build()); + } + + private boolean preparedStatementExistsOnServer(CqlSession session, ByteBuffer id) { + ResultSet resultSet = + session.execute( + SimpleStatement.newInstance( + "SELECT * FROM system.prepared_statements WHERE prepared_id = ?", id)); + return resultSet.iterator().hasNext(); + } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCachingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCachingIT.java new file mode 100644 index 00000000000..617d489fb95 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCachingIT.java @@ -0,0 +1,429 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.codahale.metrics.Gauge; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.IsolatedTests; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareSyncProcessor; +import com.datastax.oss.driver.internal.core.metadata.schema.events.TypeChangeEvent; +import com.datastax.oss.driver.internal.core.session.BuiltInRequestProcessors; +import com.datastax.oss.driver.internal.core.session.RequestProcessor; +import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; +import com.datastax.oss.driver.shaded.guava.common.cache.RemovalListener; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +// These tests must be isolated because setup modifies SessionUtils.SESSION_BUILDER_CLASS_PROPERTY +@Category(IsolatedTests.class) +public class PreparedStatementCachingIT { + + private CustomCcmRule ccmRule = CustomCcmRule.builder().build(); + + private SessionRule sessionRule = + SessionRule.builder(ccmRule) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .build()) + .build(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + private static class PreparedStatementRemovalEvent { + + private final ByteBuffer queryId; + + public PreparedStatementRemovalEvent(ByteBuffer queryId) { + this.queryId = queryId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || !(o instanceof PreparedStatementRemovalEvent)) return false; + PreparedStatementRemovalEvent that = (PreparedStatementRemovalEvent) o; + return Objects.equals(queryId, that.queryId); + } + + @Override + public int hashCode() { + return Objects.hash(queryId); + } + + @Override + public String toString() { + return "PreparedStatementRemovalEvent{" + "queryId=" + queryId + '}'; + } + } + + private static class TestCqlPrepareAsyncProcessor extends CqlPrepareAsyncProcessor { + + private static final Logger LOG = + LoggerFactory.getLogger(PreparedStatementCachingIT.TestCqlPrepareAsyncProcessor.class); + + private static RemovalListener buildCacheRemoveCallback( + @NonNull Optional context) { + return (evt) -> { + try { + CompletableFuture future = + (CompletableFuture) evt.getValue(); + ByteBuffer queryId = Uninterruptibles.getUninterruptibly(future).getId(); + context.ifPresent( + ctx -> ctx.getEventBus().fire(new PreparedStatementRemovalEvent(queryId))); + } catch (Exception e) { + LOG.error("Unable to register removal handler", e); + } + }; + } + + public TestCqlPrepareAsyncProcessor(@NonNull Optional context) { + // Default CqlPrepareAsyncProcessor uses weak values here as well. We avoid doing so + // to prevent cache entries from unexpectedly disappearing mid-test. + super(context, builder -> builder.removalListener(buildCacheRemoveCallback(context))); + } + } + + private static class TestDefaultDriverContext extends DefaultDriverContext { + public TestDefaultDriverContext( + DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { + super(configLoader, programmaticArguments); + } + + @Override + protected RequestProcessorRegistry buildRequestProcessorRegistry() { + // Re-create the processor cache to insert the TestCqlPrepareAsyncProcessor with it's strong + // prepared statement cache, see JAVA-3062 + List> processors = + BuiltInRequestProcessors.createDefaultProcessors(this); + processors.removeIf((processor) -> processor instanceof CqlPrepareAsyncProcessor); + processors.removeIf((processor) -> processor instanceof CqlPrepareSyncProcessor); + CqlPrepareAsyncProcessor asyncProcessor = new TestCqlPrepareAsyncProcessor(Optional.of(this)); + processors.add(2, asyncProcessor); + processors.add(3, new CqlPrepareSyncProcessor(asyncProcessor)); + return new RequestProcessorRegistry( + getSessionName(), processors.toArray(new RequestProcessor[0])); + } + } + + private static class TestSessionBuilder extends SessionBuilder { + + @Override + protected Object wrap(@NonNull CqlSession defaultSession) { + return defaultSession; + } + + @Override + protected DriverContext buildContext( + DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { + return new TestDefaultDriverContext(configLoader, programmaticArguments); + } + } + + @BeforeClass + public static void setup() { + System.setProperty( + SessionUtils.SESSION_BUILDER_CLASS_PROPERTY, PreparedStatementCachingIT.class.getName()); + } + + @AfterClass + public static void teardown() { + System.clearProperty(SessionUtils.SESSION_BUILDER_CLASS_PROPERTY); + } + + public static SessionBuilder builder() { + return new TestSessionBuilder(); + } + + private void invalidationResultSetTest( + Consumer setupTestSchema, Set expectedChangedTypes) { + invalidationTestInner( + setupTestSchema, + "select f from test_table_1 where e = ?", + "select h from test_table_2 where g = ?", + expectedChangedTypes); + } + + private void invalidationVariableDefsTest( + Consumer setupTestSchema, + boolean isCollection, + Set expectedChangedTypes) { + String condition = isCollection ? "contains ?" : "= ?"; + invalidationTestInner( + setupTestSchema, + String.format("select e from test_table_1 where f %s allow filtering", condition), + String.format("select g from test_table_2 where h %s allow filtering", condition), + expectedChangedTypes); + } + + private void invalidationTestInner( + Consumer setupTestSchema, + String preparedStmtQueryType1, + String preparedStmtQueryType2, + Set expectedChangedTypes) { + + try (CqlSession session = sessionWithCacheSizeMetric()) { + + assertThat(getPreparedCacheSize(session)).isEqualTo(0); + setupTestSchema.accept(session); + + session.prepare(preparedStmtQueryType1); + ByteBuffer queryId2 = session.prepare(preparedStmtQueryType2).getId(); + assertThat(getPreparedCacheSize(session)).isEqualTo(2); + + CountDownLatch preparedStmtCacheRemoveLatch = new CountDownLatch(1); + CountDownLatch typeChangeEventLatch = new CountDownLatch(expectedChangedTypes.size()); + + DefaultDriverContext ctx = (DefaultDriverContext) session.getContext(); + Map changedTypes = new ConcurrentHashMap<>(); + AtomicReference> removedQueryIds = + new AtomicReference<>(Optional.empty()); + AtomicReference> typeChangeEventError = + new AtomicReference<>(Optional.empty()); + AtomicReference> removedQueryEventError = + new AtomicReference<>(Optional.empty()); + ctx.getEventBus() + .register( + TypeChangeEvent.class, + (e) -> { + // expect one event per type changed and for every parent type that nests it + if (Boolean.TRUE.equals( + changedTypes.putIfAbsent(e.oldType.getName().toString(), true))) { + // store an error if we see duplicate change event + // any non-empty error will fail the test so it's OK to do this multiple times + typeChangeEventError.set(Optional.of("Duplicate type change event " + e)); + } + typeChangeEventLatch.countDown(); + }); + ctx.getEventBus() + .register( + PreparedStatementRemovalEvent.class, + (e) -> { + if (!removedQueryIds.compareAndSet(Optional.empty(), Optional.of(e.queryId))) { + // store an error if we see multiple cache invalidation events + // any non-empty error will fail the test so it's OK to do this multiple times + removedQueryEventError.set( + Optional.of("Unable to set reference for PS removal event")); + } + preparedStmtCacheRemoveLatch.countDown(); + }); + + // alter test_type_2 to trigger cache invalidation and above events + session.execute("ALTER TYPE test_type_2 add i blob"); + + // wait for latches and fail if they don't reach zero before timeout + assertThat( + Uninterruptibles.awaitUninterruptibly( + preparedStmtCacheRemoveLatch, 10, TimeUnit.SECONDS)) + .withFailMessage("preparedStmtCacheRemoveLatch did not trigger before timeout") + .isTrue(); + assertThat(Uninterruptibles.awaitUninterruptibly(typeChangeEventLatch, 10, TimeUnit.SECONDS)) + .withFailMessage("typeChangeEventLatch did not trigger before timeout") + .isTrue(); + + /* Okay, the latch triggered so cache processing should now be done. Let's validate :allthethings: */ + assertThat(changedTypes.keySet()).isEqualTo(expectedChangedTypes); + assertThat(removedQueryIds.get()).isNotEmpty().get().isEqualTo(queryId2); + assertThat(getPreparedCacheSize(session)).isEqualTo(1); + + // check no errors were seen in callback (and report those as fail msgs) + // if something is broken these may still succeed due to timing + // but shouldn't intermittently fail if the code is working properly + assertThat(typeChangeEventError.get()) + .withFailMessage(() -> typeChangeEventError.get().get()) + .isEmpty(); + assertThat(removedQueryEventError.get()) + .withFailMessage(() -> removedQueryEventError.get().get()) + .isEmpty(); + } + } + + Consumer setupCacheEntryTestBasic = + (session) -> { + session.execute("CREATE TYPE test_type_1 (a text, b int)"); + session.execute("CREATE TYPE test_type_2 (c int, d text)"); + session.execute("CREATE TABLE test_table_1 (e int primary key, f frozen)"); + session.execute("CREATE TABLE test_table_2 (g int primary key, h frozen)"); + }; + + @Test + public void should_invalidate_cache_entry_on_basic_udt_change_result_set() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationResultSetTest(setupCacheEntryTestBasic, ImmutableSet.of("test_type_2")); + }); + } + + @Test + public void should_invalidate_cache_entry_on_basic_udt_change_variable_defs() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationVariableDefsTest( + setupCacheEntryTestBasic, false, ImmutableSet.of("test_type_2")); + }); + } + + Consumer setupCacheEntryTestCollection = + (session) -> { + session.execute("CREATE TYPE test_type_1 (a text, b int)"); + session.execute("CREATE TYPE test_type_2 (c int, d text)"); + session.execute( + "CREATE TABLE test_table_1 (e int primary key, f list>)"); + session.execute( + "CREATE TABLE test_table_2 (g int primary key, h list>)"); + }; + + @Test + public void should_invalidate_cache_entry_on_collection_udt_change_result_set() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationResultSetTest(setupCacheEntryTestCollection, ImmutableSet.of("test_type_2")); + }); + } + + @Test + public void should_invalidate_cache_entry_on_collection_udt_change_variable_defs() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationVariableDefsTest( + setupCacheEntryTestCollection, true, ImmutableSet.of("test_type_2")); + }); + } + + Consumer setupCacheEntryTestTuple = + (session) -> { + session.execute("CREATE TYPE test_type_1 (a text, b int)"); + session.execute("CREATE TYPE test_type_2 (c int, d text)"); + session.execute( + "CREATE TABLE test_table_1 (e int primary key, f tuple)"); + session.execute( + "CREATE TABLE test_table_2 (g int primary key, h tuple)"); + }; + + @Test + public void should_invalidate_cache_entry_on_tuple_udt_change_result_set() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationResultSetTest(setupCacheEntryTestTuple, ImmutableSet.of("test_type_2")); + }); + } + + @Test + public void should_invalidate_cache_entry_on_tuple_udt_change_variable_defs() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationVariableDefsTest( + setupCacheEntryTestTuple, false, ImmutableSet.of("test_type_2")); + }); + } + + Consumer setupCacheEntryTestNested = + (session) -> { + session.execute("CREATE TYPE test_type_1 (a text, b int)"); + session.execute("CREATE TYPE test_type_2 (c int, d text)"); + session.execute("CREATE TYPE test_type_3 (e frozen, f int)"); + session.execute("CREATE TYPE test_type_4 (g int, h frozen)"); + session.execute("CREATE TABLE test_table_1 (e int primary key, f frozen)"); + session.execute("CREATE TABLE test_table_2 (g int primary key, h frozen)"); + }; + + @Test + public void should_invalidate_cache_entry_on_nested_udt_change_result_set() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationResultSetTest( + setupCacheEntryTestNested, ImmutableSet.of("test_type_2", "test_type_4")); + }); + } + + @Test + public void should_invalidate_cache_entry_on_nested_udt_change_variable_defs() { + SchemaChangeSynchronizer.withLock( + () -> { + invalidationVariableDefsTest( + setupCacheEntryTestNested, false, ImmutableSet.of("test_type_2", "test_type_4")); + }); + } + + /* ========================= Infrastructure copied from PreparedStatementIT ========================= */ + private CqlSession sessionWithCacheSizeMetric() { + return SessionUtils.newSession( + ccmRule, + sessionRule.keyspace(), + SessionUtils.configLoaderBuilder() + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .withStringList( + DefaultDriverOption.METRICS_SESSION_ENABLED, + ImmutableList.of(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())) + .build()); + } + + @SuppressWarnings("unchecked") + private static long getPreparedCacheSize(CqlSession session) { + return session + .getMetrics() + .flatMap(metrics -> metrics.getSessionMetric(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE)) + .map(metric -> ((Gauge) metric).getValue()) + .orElseThrow( + () -> + new AssertionError( + "Could not access metric " + + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCancellationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCancellationIT.java new file mode 100644 index 00000000000..d7e581e4606 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementCancellationIT.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.PrepareRequest; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.IsolatedTests; +import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; +import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; +import com.datastax.oss.driver.shaded.guava.common.base.Predicates; +import com.datastax.oss.driver.shaded.guava.common.cache.Cache; +import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; +import java.util.concurrent.CompletableFuture; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(IsolatedTests.class) +public class PreparedStatementCancellationIT { + + private CustomCcmRule ccmRule = CustomCcmRule.builder().build(); + + private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Before + public void setup() { + + CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); + session.execute("DROP TABLE IF EXISTS test_table_1"); + session.execute("CREATE TABLE test_table_1 (k int primary key, v int)"); + session.execute("INSERT INTO test_table_1 (k,v) VALUES (1, 100)"); + session.execute("INSERT INTO test_table_1 (k,v) VALUES (2, 200)"); + session.execute("INSERT INTO test_table_1 (k,v) VALUES (3, 300)"); + session.close(); + } + + @After + public void teardown() { + + CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); + session.execute("DROP TABLE test_table_1"); + session.close(); + } + + private CompletableFuture toCompletableFuture(CqlSession session, String cql) { + + return session.prepareAsync(cql).toCompletableFuture(); + } + + private CqlPrepareAsyncProcessor findProcessor(CqlSession session) { + + DefaultDriverContext context = (DefaultDriverContext) session.getContext(); + return (CqlPrepareAsyncProcessor) + Iterables.find( + context.getRequestProcessorRegistry().getProcessors(), + Predicates.instanceOf(CqlPrepareAsyncProcessor.class)); + } + + @Test + public void should_cache_valid_cql() throws Exception { + + CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); + CqlPrepareAsyncProcessor processor = findProcessor(session); + Cache> cache = processor.getCache(); + assertThat(cache.size()).isEqualTo(0); + + // Make multiple CompletableFuture requests for the specified CQL, then wait until + // the cached request finishes and confirm that all futures got the same values + String cql = "select v from test_table_1 where k = ?"; + CompletableFuture cf1 = toCompletableFuture(session, cql); + CompletableFuture cf2 = toCompletableFuture(session, cql); + assertThat(cache.size()).isEqualTo(1); + + CompletableFuture future = Iterables.get(cache.asMap().values(), 0); + PreparedStatement stmt = future.get(); + + assertThat(cf1.isDone()).isTrue(); + assertThat(cf2.isDone()).isTrue(); + + assertThat(cf1.join()).isEqualTo(stmt); + assertThat(cf2.join()).isEqualTo(stmt); + } + + // A holdover from work done on JAVA-3055. This probably isn't _desired_ behaviour but this test + // documents the fact that the current driver impl will behave in this way. We should probably + // consider changing this in a future release, although it's worthwhile fully considering the + // implications of such a change. + @Test + public void will_cache_invalid_cql() throws Exception { + + CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); + CqlPrepareAsyncProcessor processor = findProcessor(session); + Cache> cache = processor.getCache(); + assertThat(cache.size()).isEqualTo(0); + + // Verify that we get the CompletableFuture even if the CQL is invalid but that nothing is + // cached + String cql = "select v fromfrom test_table_1 where k = ?"; + CompletableFuture cf = toCompletableFuture(session, cql); + + // join() here should throw exceptions due to the invalid syntax... for purposes of this test we + // can ignore this + try { + cf.join(); + fail(); + } catch (Exception e) { + } + + assertThat(cache.size()).isEqualTo(1); + } + + @Test + public void should_not_affect_cache_if_returned_futures_are_cancelled() throws Exception { + + CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace()); + CqlPrepareAsyncProcessor processor = findProcessor(session); + Cache> cache = processor.getCache(); + assertThat(cache.size()).isEqualTo(0); + + String cql = "select v from test_table_1 where k = ?"; + CompletableFuture cf = toCompletableFuture(session, cql); + + assertThat(cf.isCancelled()).isFalse(); + assertThat(cf.cancel(false)).isTrue(); + assertThat(cf.isCancelled()).isTrue(); + assertThat(cf.isCompletedExceptionally()).isTrue(); + + // Confirm that cancelling the CompletableFuture returned to the user does _not_ cancel the + // future used within the cache. CacheEntry very deliberately doesn't maintain a reference + // to it's contained CompletableFuture so we have to get at this by secondary effects. + assertThat(cache.size()).isEqualTo(1); + CompletableFuture future = Iterables.get(cache.asMap().values(), 0); + PreparedStatement rv = future.get(); + assertThat(rv).isNotNull(); + assertThat(rv.getQuery()).isEqualTo(cql); + assertThat(cache.size()).isEqualTo(1); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PreparedStatementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementIT.java similarity index 56% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PreparedStatementIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementIT.java index 994890eeb81..5671a7684e5 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/PreparedStatementIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/PreparedStatementIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,35 +15,47 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; -import static junit.framework.TestCase.fail; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.catchThrowable; import com.codahale.metrics.Gauge; import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultProtocolVersion; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.metadata.token.DefaultTokenMap; +import com.datastax.oss.driver.internal.core.metadata.token.TokenFactory; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.protocol.internal.util.Bytes; import com.google.common.collect.ImmutableList; import java.nio.ByteBuffer; import java.time.Duration; import java.util.concurrent.CompletionStage; +import junit.framework.TestCase; +import org.assertj.core.api.AbstractThrowableAssert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @@ -56,42 +70,40 @@ @Category(ParallelizableTests.class) public class PreparedStatementIT { - private final CcmRule ccmRule = CcmRule.getInstance(); + private CcmRule ccmRule = CcmRule.getInstance(); - private final SessionRule sessionRule = + private SessionRule sessionRule = SessionRule.builder(ccmRule) .withConfigLoader( SessionUtils.configLoaderBuilder() .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) - .withStringList( - DefaultDriverOption.METRICS_SESSION_ENABLED, - ImmutableList.of(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())) .build()) .build(); @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); - @Rule public ExpectedException thrown = ExpectedException.none(); - @Before public void setupSchema() { for (String query : ImmutableList.of( + "DROP TABLE IF EXISTS prepared_statement_test", "CREATE TABLE prepared_statement_test (a int PRIMARY KEY, b int, c int)", "INSERT INTO prepared_statement_test (a, b, c) VALUES (1, 1, 1)", "INSERT INTO prepared_statement_test (a, b, c) VALUES (2, 2, 2)", "INSERT INTO prepared_statement_test (a, b, c) VALUES (3, 3, 3)", "INSERT INTO prepared_statement_test (a, b, c) VALUES (4, 4, 4)")) { - sessionRule - .session() - .execute( - SimpleStatement.builder(query) - .setExecutionProfile(sessionRule.slowProfile()) - .build()); + executeDdl(query); } } + private void executeDdl(String query) { + sessionRule + .session() + .execute( + SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + } + @Test public void should_have_empty_result_definitions_for_insert_query_without_bound_variable() { try (CqlSession session = SessionUtils.newSession(ccmRule, sessionRule.keyspace())) { @@ -109,13 +121,7 @@ public void should_have_non_empty_result_definitions_for_insert_query_with_bound PreparedStatement prepared = session.prepare("INSERT INTO prepared_statement_test (a, b, c) VALUES (?, ?, ?)"); assertThat(prepared.getVariableDefinitions()).hasSize(3); - if (sessionRule.session().getContext().getProtocolVersion().getCode() - >= DefaultProtocolVersion.V4.getCode()) { - // partition key indices were introduced in V4 - assertThat(prepared.getPartitionKeyIndices()).hasSize(1); - } else { - assertThat(prepared.getPartitionKeyIndices()).isEmpty(); - } + assertThat(prepared.getPartitionKeyIndices()).hasSize(1); assertThat(prepared.getResultSetDefinitions()).isEmpty(); } } @@ -137,19 +143,13 @@ public void should_have_non_empty_variable_definitions_for_select_query_with_bou PreparedStatement prepared = session.prepare("SELECT a,b,c FROM prepared_statement_test WHERE a = ?"); assertThat(prepared.getVariableDefinitions()).hasSize(1); - if (sessionRule.session().getContext().getProtocolVersion().getCode() - >= DefaultProtocolVersion.V4.getCode()) { - // partition key indices were introduced in V4 - assertThat(prepared.getPartitionKeyIndices()).hasSize(1); - } else { - assertThat(prepared.getPartitionKeyIndices()).isEmpty(); - } + assertThat(prepared.getPartitionKeyIndices()).hasSize(1); assertThat(prepared.getResultSetDefinitions()).hasSize(3); } } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_update_metadata_when_schema_changed_across_executions() { // Given CqlSession session = sessionRule.session(); @@ -178,7 +178,7 @@ public void should_update_metadata_when_schema_changed_across_executions() { } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_update_metadata_when_schema_changed_across_pages() { // Given CqlSession session = sessionRule.session(); @@ -194,7 +194,7 @@ public void should_update_metadata_when_schema_changed_across_pages() { for (Row row : rows.currentPage()) { try { row.getInt("d"); - fail("expected an error"); + TestCase.fail("expected an error"); } catch (IllegalArgumentException e) { /*expected*/ } @@ -223,7 +223,7 @@ public void should_update_metadata_when_schema_changed_across_pages() { } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_update_metadata_when_schema_changed_across_sessions() { // Given CqlSession session1 = sessionRule.session(); @@ -270,7 +270,7 @@ public void should_update_metadata_when_schema_changed_across_sessions() { } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_fail_to_reprepare_if_query_becomes_invalid() { // Given CqlSession session = sessionRule.session(); @@ -279,21 +279,23 @@ public void should_fail_to_reprepare_if_query_becomes_invalid() { session.prepare("SELECT a, b, c, d FROM prepared_statement_test WHERE a = ?"); session.execute("ALTER TABLE prepared_statement_test DROP d"); - thrown.expect(InvalidQueryException.class); - thrown.expectMessage("Undefined column name d"); - // When - session.execute(ps.bind()); + Throwable t = catchThrowable(() -> session.execute(ps.bind())); + + // Then + assertThat(t) + .isInstanceOf(InvalidQueryException.class) + .hasMessageContaining("Undefined column name d"); } @Test - @CassandraRequirement(min = "4.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0") public void should_not_store_metadata_for_conditional_updates() { should_not_store_metadata_for_conditional_updates(sessionRule.session()); } @Test - @CassandraRequirement(min = "2.2") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") public void should_not_store_metadata_for_conditional_updates_in_legacy_protocol() { DriverConfigLoader loader = SessionUtils.configLoaderBuilder() @@ -365,62 +367,202 @@ private void should_not_store_metadata_for_conditional_updates(CqlSession sessio @Test public void should_return_same_instance_when_repreparing_query() { - // Given - CqlSession session = sessionRule.session(); - assertThat(getPreparedCacheSize(session)).isEqualTo(0); - String query = "SELECT * FROM prepared_statement_test WHERE a = ?"; - - // When - PreparedStatement preparedStatement1 = session.prepare(query); - PreparedStatement preparedStatement2 = session.prepare(query); - - // Then - assertThat(preparedStatement1).isSameAs(preparedStatement2); - assertThat(getPreparedCacheSize(session)).isEqualTo(1); + try (CqlSession session = sessionWithCacheSizeMetric()) { + // Given + assertThat(getPreparedCacheSize(session)).isEqualTo(0); + String query = "SELECT * FROM prepared_statement_test WHERE a = ?"; + + // Send prepare requests, keep hold of CompletionStage objects to prevent them being removed + // from CqlPrepareAsyncProcessor#cache, see JAVA-3062 + CompletionStage preparedStatement1Future = session.prepareAsync(query); + CompletionStage preparedStatement2Future = session.prepareAsync(query); + + PreparedStatement preparedStatement1 = + CompletableFutures.getUninterruptibly(preparedStatement1Future); + PreparedStatement preparedStatement2 = + CompletableFutures.getUninterruptibly(preparedStatement2Future); + + // Then + assertThat(preparedStatement1).isSameAs(preparedStatement2); + assertThat(getPreparedCacheSize(session)).isEqualTo(1); + } } /** Just to illustrate that the driver does not sanitize query strings. */ @Test public void should_create_separate_instances_for_differently_formatted_queries() { - // Given - CqlSession session = sessionRule.session(); - assertThat(getPreparedCacheSize(session)).isEqualTo(0); + try (CqlSession session = sessionWithCacheSizeMetric()) { + // Given + assertThat(getPreparedCacheSize(session)).isEqualTo(0); + + // Send prepare requests, keep hold of CompletionStage objects to prevent them being removed + // from CqlPrepareAsyncProcessor#cache, see JAVA-3062 + CompletionStage preparedStatement1Future = + session.prepareAsync("SELECT * FROM prepared_statement_test WHERE a = ?"); + CompletionStage preparedStatement2Future = + session.prepareAsync("select * from prepared_statement_test where a = ?"); + + PreparedStatement preparedStatement1 = + CompletableFutures.getUninterruptibly(preparedStatement1Future); + PreparedStatement preparedStatement2 = + CompletableFutures.getUninterruptibly(preparedStatement2Future); + + // Then + assertThat(preparedStatement1).isNotSameAs(preparedStatement2); + assertThat(getPreparedCacheSize(session)).isEqualTo(2); + } + } - // When - PreparedStatement preparedStatement1 = - session.prepare("SELECT * FROM prepared_statement_test WHERE a = ?"); - PreparedStatement preparedStatement2 = - session.prepare("select * from prepared_statement_test where a = ?"); + @Test + public void should_create_separate_instances_for_different_statement_parameters() { + try (CqlSession session = sessionWithCacheSizeMetric()) { + // Given + assertThat(getPreparedCacheSize(session)).isEqualTo(0); + SimpleStatement statement = + SimpleStatement.newInstance("SELECT * FROM prepared_statement_test"); + + // Send prepare requests, keep hold of CompletionStage objects to prevent them being removed + // from CqlPrepareAsyncProcessor#cache, see JAVA-3062 + CompletionStage preparedStatement1Future = + session.prepareAsync(statement.setPageSize(1)); + CompletionStage preparedStatement2Future = + session.prepareAsync(statement.setPageSize(4)); + + PreparedStatement preparedStatement1 = + CompletableFutures.getUninterruptibly(preparedStatement1Future); + PreparedStatement preparedStatement2 = + CompletableFutures.getUninterruptibly(preparedStatement2Future); + + // Then + assertThat(preparedStatement1).isNotSameAs(preparedStatement2); + assertThat(getPreparedCacheSize(session)).isEqualTo(2); + // Each bound statement uses the page size it was prepared with + assertThat(firstPageOf(session.executeAsync(preparedStatement1.bind()))).hasSize(1); + assertThat(firstPageOf(session.executeAsync(preparedStatement2.bind()))).hasSize(4); + } + } - // Then - assertThat(preparedStatement1).isNotSameAs(preparedStatement2); - assertThat(getPreparedCacheSize(session)).isEqualTo(2); + /** + * This method reproduces CASSANDRA-15252 which is fixed in 3.0.26/3.11.12/4.0.2. + * + * @see CASSANDRA-15252 + */ + private AbstractThrowableAssert assertableReprepareAfterIdChange() { + try (CqlSession session = SessionUtils.newSession(ccmRule)) { + PreparedStatement preparedStatement = + session.prepare( + String.format( + "SELECT * FROM %s.prepared_statement_test WHERE a = ?", sessionRule.keyspace())); + + session.execute("USE " + sessionRule.keyspace().asCql(false)); + + // Drop and recreate the table to invalidate the prepared statement server-side + executeDdl("DROP TABLE prepared_statement_test"); + executeDdl("CREATE TABLE prepared_statement_test (a int PRIMARY KEY, b int, c int)"); + + return assertThatCode(() -> session.execute(preparedStatement.bind(1))); + } } + // Add version bounds to the DSE requirement if there is a version containing fix for + // CASSANDRA-15252 + @BackendRequirement( + type = BackendType.DSE, + description = "No DSE version contains fix for CASSANDRA-15252") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "3.0.0", maxExclusive = "3.0.26") + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.11.0", + maxExclusive = "3.11.12") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0.0", maxExclusive = "4.0.2") @Test - public void should_create_separate_instances_for_different_statement_parameters() { - // Given + public void should_fail_fast_if_id_changes_on_reprepare() { + assertableReprepareAfterIdChange() + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("ID mismatch while trying to reprepare"); + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.0.26", + maxExclusive = "3.11.0") + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.11.12", + maxExclusive = "4.0.0") + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "4.0.2") + @Test + public void handle_id_changes_on_reprepare() { + assertableReprepareAfterIdChange().doesNotThrowAnyException(); + } + + @Test + public void should_infer_routing_information_when_partition_key_is_bound() { + should_infer_routing_information_when_partition_key_is_bound( + "SELECT a FROM prepared_statement_test WHERE a = ?"); + should_infer_routing_information_when_partition_key_is_bound( + "INSERT INTO prepared_statement_test (a) VALUES (?)"); + should_infer_routing_information_when_partition_key_is_bound( + "UPDATE prepared_statement_test SET b = 1 WHERE a = ?"); + should_infer_routing_information_when_partition_key_is_bound( + "DELETE FROM prepared_statement_test WHERE a = ?"); + } + + private void should_infer_routing_information_when_partition_key_is_bound(String queryString) { CqlSession session = sessionRule.session(); - assertThat(getPreparedCacheSize(session)).isEqualTo(0); - SimpleStatement statement = - SimpleStatement.newInstance("SELECT * FROM prepared_statement_test"); + TokenFactory tokenFactory = + ((DefaultTokenMap) session.getMetadata().getTokenMap().orElseThrow(AssertionError::new)) + .getTokenFactory(); - // When - PreparedStatement preparedStatement1 = session.prepare(statement.setPageSize(1)); - PreparedStatement preparedStatement2 = session.prepare(statement.setPageSize(4)); + // We'll bind a=1 in the query, check what token this is supposed to produce + Token expectedToken = + session + .execute("SELECT token(a) FROM prepared_statement_test WHERE a = 1") + .one() + .getToken(0); - // Then - assertThat(preparedStatement1).isNotSameAs(preparedStatement2); - assertThat(getPreparedCacheSize(session)).isEqualTo(2); - // Each bound statement uses the page size it was prepared with - assertThat(firstPageOf(session.executeAsync(preparedStatement1.bind()))).hasSize(1); - assertThat(firstPageOf(session.executeAsync(preparedStatement2.bind()))).hasSize(4); + BoundStatement boundStatement = session.prepare(queryString).bind().setInt("a", 1); + + assertThat(boundStatement.getRoutingKeyspace()).isEqualTo(sessionRule.keyspace()); + assertThat(tokenFactory.hash(boundStatement.getRoutingKey())).isEqualTo(expectedToken); + } + + @Test + public void should_return_null_routing_information_when_single_partition_key_is_unbound() { + should_return_null_routing_information_when_single_partition_key_is_unbound( + "SELECT a FROM prepared_statement_test WHERE a = ?"); + should_return_null_routing_information_when_single_partition_key_is_unbound( + "INSERT INTO prepared_statement_test (a) VALUES (?)"); + should_return_null_routing_information_when_single_partition_key_is_unbound( + "UPDATE prepared_statement_test SET b = 1 WHERE a = ?"); + should_return_null_routing_information_when_single_partition_key_is_unbound( + "DELETE FROM prepared_statement_test WHERE a = ?"); + } + + private void should_return_null_routing_information_when_single_partition_key_is_unbound( + String queryString) { + CqlSession session = sessionRule.session(); + BoundStatement boundStatement = session.prepare(queryString).bind(); + assertThat(boundStatement.getRoutingKey()).isNull(); } private static Iterable firstPageOf(CompletionStage stage) { return CompletableFutures.getUninterruptibly(stage).currentPage(); } + private CqlSession sessionWithCacheSizeMetric() { + return SessionUtils.newSession( + ccmRule, + sessionRule.keyspace(), + SessionUtils.configLoaderBuilder() + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 2) + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .withStringList( + DefaultDriverOption.METRICS_SESSION_ENABLED, + ImmutableList.of(DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE.getPath())) + .build()); + } + @SuppressWarnings("unchecked") private static long getPreparedCacheSize(CqlSession session) { return session diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/QueryTraceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/QueryTraceIT.java new file mode 100644 index 00000000000..37a600efbc4 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/QueryTraceIT.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.QueryTrace; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class QueryTraceIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @Test + public void should_not_have_tracing_id_when_tracing_disabled() { + ExecutionInfo executionInfo = + SESSION_RULE + .session() + .execute("SELECT release_version FROM system.local") + .getExecutionInfo(); + + assertThat(executionInfo.getTracingId()).isNull(); + + Throwable t = catchThrowable(executionInfo::getQueryTrace); + + assertThat(t) + .isInstanceOf(IllegalStateException.class) + .hasMessage("Tracing was disabled for this request"); + } + + @Test + public void should_fetch_trace_when_tracing_enabled() { + ExecutionInfo executionInfo = + SESSION_RULE + .session() + .execute( + SimpleStatement.builder("SELECT release_version FROM system.local") + .setTracing() + .build()) + .getExecutionInfo(); + + assertThat(executionInfo.getTracingId()).isNotNull(); + + EndPoint contactPoint = CCM_RULE.getContactPoints().iterator().next(); + InetAddress nodeAddress = ((InetSocketAddress) contactPoint.resolve()).getAddress(); + boolean expectPorts = + CCM_RULE.getCassandraVersion().nextStable().compareTo(Version.V4_0_0) >= 0 + && !CCM_RULE.isDistributionOf(BackendType.DSE); + + QueryTrace queryTrace = executionInfo.getQueryTrace(); + assertThat(queryTrace.getTracingId()).isEqualTo(executionInfo.getTracingId()); + assertThat(queryTrace.getRequestType()).isEqualTo("Execute CQL3 query"); + assertThat(queryTrace.getDurationMicros()).isPositive(); + assertThat(queryTrace.getCoordinatorAddress().getAddress()).isEqualTo(nodeAddress); + if (expectPorts) { + Row row = + SESSION_RULE + .session() + .execute( + "SELECT coordinator_port FROM system_traces.sessions WHERE session_id = " + + queryTrace.getTracingId()) + .one(); + assertThat(row).isNotNull(); + int expectedPort = row.getInt(0); + assertThat(queryTrace.getCoordinatorAddress().getPort()).isEqualTo(expectedPort); + } else { + assertThat(queryTrace.getCoordinatorAddress().getPort()).isEqualTo(0); + } + assertThat(queryTrace.getParameters()) + .containsEntry("consistency_level", "LOCAL_ONE") + .containsEntry("page_size", "5000") + .containsEntry("query", "SELECT release_version FROM system.local") + .containsEntry("serial_consistency_level", "SERIAL"); + assertThat(queryTrace.getStartedAt()).isPositive(); + // Don't want to get too deep into event testing because that could change across versions + assertThat(queryTrace.getEvents()).isNotEmpty(); + InetSocketAddress sourceAddress0 = queryTrace.getEvents().get(0).getSourceAddress(); + assertThat(sourceAddress0).isNotNull(); + assertThat(sourceAddress0.getAddress()).isEqualTo(nodeAddress); + if (expectPorts) { + Row row = + SESSION_RULE + .session() + .execute( + "SELECT source_port FROM system_traces.events WHERE session_id = " + + queryTrace.getTracingId() + + " LIMIT 1") + .one(); + assertThat(row).isNotNull(); + int expectedPort = row.getInt(0); + assertThat(sourceAddress0.getPort()).isEqualTo(expectedPort); + } else { + assertThat(sourceAddress0.getPort()).isEqualTo(0); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/SimpleStatementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementCcmIT.java similarity index 71% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/SimpleStatementIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementCcmIT.java index 2cc6d520da5..b903f59efcc 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/cql/SimpleStatementIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementCcmIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,86 +15,69 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.cql; +package com.datastax.oss.driver.core.cql; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; -import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static org.assertj.core.api.Assertions.assertThat; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; -import com.datastax.oss.driver.api.core.DriverTimeoutException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; -import com.datastax.oss.protocol.internal.Message; -import com.datastax.oss.protocol.internal.request.Query; -import com.datastax.oss.simulacron.common.cluster.ClusterSpec; -import com.datastax.oss.simulacron.common.cluster.QueryLog; -import java.time.Duration; import java.util.List; import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestName; import org.junit.rules.TestRule; @Category(ParallelizableTests.class) -public class SimpleStatementIT { - - private static CcmRule ccm = CcmRule.getInstance(); +public class SimpleStatementCcmIT { - private static SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = - SessionRule.builder(ccm) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, 20) .build()) .build(); - private static SessionRule simulacronSessionRule = - SessionRule.builder(simulacron).build(); - - @ClassRule public static TestRule ccmChain = RuleChain.outerRule(ccm).around(sessionRule); - @ClassRule - public static TestRule simulacronChain = - RuleChain.outerRule(simulacron).around(simulacronSessionRule); + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @Rule public TestName name = new TestName(); - @Rule public ExpectedException thrown = ExpectedException.none(); - private static final String KEY = "test"; @BeforeClass public static void setupSchema() { // table where every column forms the primary key. - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder( "CREATE TABLE IF NOT EXISTS test (k text, v int, PRIMARY KEY(k, v))") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); for (int i = 0; i < 100; i++) { - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder("INSERT INTO test (k, v) VALUES (?, ?)") @@ -101,32 +86,26 @@ public static void setupSchema() { } // table with simple primary key, single cell. - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder("CREATE TABLE IF NOT EXISTS test2 (k text primary key, v int)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); } - @Before - public void clearPrimes() { - simulacron.cluster().clearLogs(); - simulacron.cluster().clearPrimes(true); - } - @Test public void should_use_paging_state_when_copied() { Statement st = SimpleStatement.builder(String.format("SELECT v FROM test WHERE k='%s'", KEY)).build(); - ResultSet result = sessionRule.session().execute(st); + ResultSet result = SESSION_RULE.session().execute(st); // given a query created from a copy of a previous query with paging state from previous queries // response. st = st.copy(result.getExecutionInfo().getPagingState()); // when executing that query. - result = sessionRule.session().execute(st); + result = SESSION_RULE.session().execute(st); // then the response should start on the page boundary. assertThat(result.iterator().next().getInt("v")).isEqualTo(20); @@ -136,7 +115,7 @@ public void should_use_paging_state_when_copied() { public void should_use_paging_state_when_provided_to_new_statement() { Statement st = SimpleStatement.builder(String.format("SELECT v FROM test WHERE k='%s'", KEY)).build(); - ResultSet result = sessionRule.session().execute(st); + ResultSet result = SESSION_RULE.session().execute(st); // given a query created from a copy of a previous query with paging state from previous queries // response. @@ -146,7 +125,7 @@ public void should_use_paging_state_when_provided_to_new_statement() { .build(); // when executing that query. - result = sessionRule.session().execute(st); + result = SESSION_RULE.session().execute(st); // then the response should start on the page boundary. assertThat(result.iterator().next().getInt("v")).isEqualTo(20); @@ -157,7 +136,7 @@ public void should_use_paging_state_when_provided_to_new_statement() { public void should_fail_if_using_paging_state_from_different_query() { Statement st = SimpleStatement.builder("SELECT v FROM test WHERE k=:k").addNamedValue("k", KEY).build(); - ResultSet result = sessionRule.session().execute(st); + ResultSet result = SESSION_RULE.session().execute(st); // TODO Expect PagingStateException @@ -178,7 +157,7 @@ public void should_use_timestamp_when_set() { .setQueryTimestamp(timestamp) .build(); - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // when retrieving writetime of cell from that insert. SimpleStatement select = @@ -186,7 +165,7 @@ public void should_use_timestamp_when_set() { .addPositionalValue(name.getMethodName()) .build(); - ResultSet result = sessionRule.session().execute(select); + ResultSet result = SESSION_RULE.session().execute(select); List rows = result.all(); assertThat(rows).hasSize(1); @@ -200,10 +179,9 @@ public void should_use_timestamp_when_set() { public void should_use_tracing_when_set() { // TODO currently there's no way to validate tracing was set since trace id is not set // also write test to verify it is not set. - ResultSet result = - sessionRule - .session() - .execute(SimpleStatement.builder("select * from test").setTracing().build()); + SESSION_RULE + .session() + .execute(SimpleStatement.builder("select * from test").setTracing().build()); } @Test @@ -216,7 +194,7 @@ public void should_use_positional_values() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then we should be able to retrieve the data as inserted. SimpleStatement select = @@ -224,7 +202,7 @@ public void should_use_positional_values() { .addPositionalValue(name.getMethodName()) .build(); - ResultSet result = sessionRule.session().execute(select); + ResultSet result = SESSION_RULE.session().execute(select); List rows = result.all(); assertThat(rows).hasSize(1); @@ -243,7 +221,7 @@ public void should_allow_nulls_in_positional_values() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then we should be able to retrieve the data as inserted. SimpleStatement select = @@ -251,7 +229,7 @@ public void should_allow_nulls_in_positional_values() { .addPositionalValue(name.getMethodName()) .build(); - ResultSet result = sessionRule.session().execute(select); + ResultSet result = SESSION_RULE.session().execute(select); List rows = result.all(); assertThat(rows).hasSize(1); @@ -269,7 +247,7 @@ public void should_fail_when_too_many_positional_values_provided() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then the server will throw an InvalidQueryException which is thrown up to the client. } @@ -283,7 +261,7 @@ public void should_fail_when_not_enough_positional_values_provided() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then the server will throw an InvalidQueryException which is thrown up to the client. } @@ -298,7 +276,7 @@ public void should_use_named_values() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then we should be able to retrieve the data as inserted. SimpleStatement select = @@ -306,7 +284,7 @@ public void should_use_named_values() { .addNamedValue("k", name.getMethodName()) .build(); - ResultSet result = sessionRule.session().execute(select); + ResultSet result = SESSION_RULE.session().execute(select); List rows = result.all(); assertThat(rows).hasSize(1); @@ -325,7 +303,7 @@ public void should_allow_nulls_in_named_values() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then we should be able to retrieve the data as inserted. SimpleStatement select = @@ -333,7 +311,7 @@ public void should_allow_nulls_in_named_values() { .addNamedValue("k", name.getMethodName()) .build(); - ResultSet result = sessionRule.session().execute(select); + ResultSet result = SESSION_RULE.session().execute(select); List rows = result.all(); assertThat(rows).hasSize(1); @@ -351,7 +329,7 @@ public void should_fail_when_named_value_missing() { .build(); // when executing that statement - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); // then the server will throw an InvalidQueryException which is thrown up to the client. } @@ -378,56 +356,17 @@ public void should_use_positional_value_with_case_sensitive_id() { SimpleStatement.builder("SELECT count(*) FROM test2 WHERE k=:\"theKey\"") .addNamedValue(CqlIdentifier.fromCql("\"theKey\""), 0) .build(); - Row row = sessionRule.session().execute(statement).one(); + Row row = SESSION_RULE.session().execute(statement).one(); assertThat(row.getLong(0)).isEqualTo(0); } @Test public void should_use_page_size() { Statement st = SimpleStatement.builder("SELECT v FROM test").setPageSize(10).build(); - CompletionStage future = sessionRule.session().executeAsync(st); + CompletionStage future = SESSION_RULE.session().executeAsync(st); AsyncResultSet result = CompletableFutures.getUninterruptibly(future); // Should have only fetched 10 (page size) rows. assertThat(result.remaining()).isEqualTo(10); } - - @Test - public void should_use_consistencies() { - SimpleStatement st = - SimpleStatement.builder("SELECT * FROM test where k = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO) - .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) - .build(); - simulacronSessionRule.session().execute(st); - - List logs = simulacron.cluster().getLogs().getQueryLogs(); - assertThat(logs).hasSize(1); - - QueryLog log = logs.get(0); - - Message message = log.getFrame().message; - assertThat(message).isInstanceOf(Query.class); - Query query = (Query) message; - assertThat(query.options.consistency).isEqualTo(DefaultConsistencyLevel.TWO.getProtocolCode()); - assertThat(query.options.serialConsistency) - .isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL.getProtocolCode()); - } - - @Test - public void should_use_timeout() { - simulacron - .cluster() - .prime(when("mock query").then(noRows()).delay(1500, TimeUnit.MILLISECONDS)); - SimpleStatement st = - SimpleStatement.builder("mock query") - .setTimeout(Duration.ofSeconds(1)) - .setConsistencyLevel(DefaultConsistencyLevel.ONE) - .build(); - - thrown.expect(DriverTimeoutException.class); - thrown.expectMessage("Query timed out after PT1S"); - - simulacronSessionRule.session().execute(st); - } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementSimulacronIT.java new file mode 100644 index 00000000000..bb8b4f6b731 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/SimpleStatementSimulacronIT.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql; + +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.cluster.QueryLog; +import java.time.Duration; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class SimpleStatementSimulacronIT { + + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); + + @Before + public void clearPrimes() { + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + } + + @Test + public void should_use_consistencies() { + SimpleStatement st = + SimpleStatement.builder("SELECT * FROM test where k = ?") + .setConsistencyLevel(DefaultConsistencyLevel.TWO) + .setSerialConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) + .build(); + SESSION_RULE.session().execute(st); + + List logs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); + assertThat(logs).hasSize(1); + + QueryLog log = logs.get(0); + + Message message = log.getFrame().message; + assertThat(message).isInstanceOf(Query.class); + Query query = (Query) message; + assertThat(query.options.consistency).isEqualTo(DefaultConsistencyLevel.TWO.getProtocolCode()); + assertThat(query.options.serialConsistency) + .isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL.getProtocolCode()); + } + + @Test + public void should_use_timeout() { + SIMULACRON_RULE + .cluster() + .prime(when("mock query").then(noRows()).delay(1500, TimeUnit.MILLISECONDS)); + SimpleStatement st = + SimpleStatement.builder("mock query") + .setTimeout(Duration.ofSeconds(1)) + .setConsistencyLevel(DefaultConsistencyLevel.ONE) + .build(); + + Throwable t = catchThrowable(() -> SESSION_RULE.session().execute(st)); + + assertThat(t) + .isInstanceOf(DriverTimeoutException.class) + .hasMessage("Query timed out after PT1S"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/DefaultReactiveResultSetIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/DefaultReactiveResultSetIT.java new file mode 100644 index 00000000000..c00cf064e51 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/DefaultReactiveResultSetIT.java @@ -0,0 +1,316 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql.reactive; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.BatchStatement; +import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.DefaultBatchType; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import edu.umd.cs.findbugs.annotations.NonNull; +import io.reactivex.Flowable; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +@Category(ParallelizableTests.class) +public class DefaultReactiveResultSetIT { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @BeforeClass + public static void initialize() { + CqlSession session = sessionRule.session(); + SchemaChangeSynchronizer.withLock( + () -> { + session.execute("DROP TABLE IF EXISTS test_reactive_read"); + session.execute("DROP TABLE IF EXISTS test_reactive_write"); + session.checkSchemaAgreement(); + session.execute( + SimpleStatement.builder( + "CREATE TABLE test_reactive_read (pk int, cc int, v int, PRIMARY KEY ((pk), cc))") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder( + "CREATE TABLE test_reactive_write (pk int, cc int, v int, PRIMARY KEY ((pk), cc))") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.checkSchemaAgreement(); + }); + for (int i = 0; i < 1000; i++) { + session.execute( + SimpleStatement.builder("INSERT INTO test_reactive_read (pk, cc, v) VALUES (0, ?, ?)") + .addPositionalValue(i) + .addPositionalValue(i) + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + } + + @Before + public void truncateTables() throws Exception { + CqlSession session = sessionRule.session(); + session.execute( + SimpleStatement.builder("TRUNCATE test_reactive_write") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + + @Test + @DataProvider( + value = {"1", "10", "100", "999", "1000", "1001", "2000"}, + format = "%m [page size %p[0]]") + public void should_retrieve_all_rows(int pageSize) { + DriverExecutionProfile profile = + sessionRule + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, pageSize); + SimpleStatement statement = + SimpleStatement.builder("SELECT cc, v FROM test_reactive_read WHERE pk = 0") + .setExecutionProfile(profile) + .build(); + ReactiveResultSet rs = sessionRule.session().executeReactive(statement); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results.size()).isEqualTo(1000); + Set expectedExecInfos = new LinkedHashSet<>(); + for (int i = 0; i < results.size(); i++) { + ReactiveRow row = results.get(i); + assertThat(row.getColumnDefinitions()).isNotNull(); + assertThat(row.getExecutionInfo()).isNotNull(); + assertThat(row.wasApplied()).isTrue(); + assertThat(row.getInt("cc")).isEqualTo(i); + assertThat(row.getInt("v")).isEqualTo(i); + expectedExecInfos.add(row.getExecutionInfo()); + } + + List execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + // DSE may send an empty page as it can't always know if it's done paging or not yet. + // See: CASSANDRA-8871. In this case, this page's execution info appears in + // rs.getExecutionInfos(), but is not present in expectedExecInfos since the page did not + // contain any rows. + assertThat(execInfos).containsAll(expectedExecInfos); + + List colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + ReactiveRow first = results.get(0); + assertThat(colDefs).hasSize(1).containsExactly(first.getColumnDefinitions()); + + List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(first.wasApplied()); + } + + @Test + public void should_write() { + SimpleStatement statement = + SimpleStatement.builder("INSERT INTO test_reactive_write (pk, cc, v) VALUES (?, ?, ?)") + .addPositionalValue(0) + .addPositionalValue(1) + .addPositionalValue(2) + .setExecutionProfile(sessionRule.slowProfile()) + .build(); + ReactiveResultSet rs = sessionRule.session().executeReactive(statement); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).isEmpty(); + + List execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + assertThat(execInfos).hasSize(1); + + List colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + assertThat(colDefs).hasSize(1).containsExactly(EmptyColumnDefinitions.INSTANCE); + + List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(true); + } + + @Test + public void should_write_cas() { + SimpleStatement statement = + SimpleStatement.builder( + "INSERT INTO test_reactive_write (pk, cc, v) VALUES (?, ?, ?) IF NOT EXISTS") + .addPositionalValue(0) + .addPositionalValue(1) + .addPositionalValue(2) + .setExecutionProfile(sessionRule.slowProfile()) + .build(); + // execute statement for the first time: the insert should succeed and the server should return + // only one acknowledgement row with just the [applied] column = true + ReactiveResultSet rs = sessionRule.session().executeReactive(statement); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).hasSize(1); + ReactiveRow row = results.get(0); + assertThat(row.getExecutionInfo()).isNotNull(); + assertThat(row.getColumnDefinitions()).hasSize(1); + assertThat(row.wasApplied()).isTrue(); + assertThat(row.getBoolean("[applied]")).isTrue(); + + List execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); + + List colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); + + List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); + + // re-execute same statement: server should return one row with data that failed to be inserted, + // with [applied] = false + rs = sessionRule.session().executeReactive(statement); + results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).hasSize(1); + row = results.get(0); + assertThat(row.getExecutionInfo()).isNotNull(); + assertThat(row.getColumnDefinitions()).hasSize(4); + assertThat(row.wasApplied()).isFalse(); + assertThat(row.getBoolean("[applied]")).isFalse(); + assertThat(row.getInt("pk")).isEqualTo(0); + assertThat(row.getInt("cc")).isEqualTo(1); + assertThat(row.getInt("v")).isEqualTo(2); + + execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); + + colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); + + wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); + } + + @Test + public void should_write_batch_cas() { + BatchStatement batch = createCASBatch(); + CqlSession session = sessionRule.session(); + // execute batch for the first time: all inserts should succeed and the server should return + // only one acknowledgement row with just the [applied] column = true + ReactiveResultSet rs = session.executeReactive(batch); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).hasSize(1); + ReactiveRow row = results.get(0); + assertThat(row.getExecutionInfo()).isNotNull(); + assertThat(row.getColumnDefinitions()).hasSize(1); + assertThat(row.wasApplied()).isTrue(); + assertThat(row.getBoolean("[applied]")).isTrue(); + + List execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); + + List colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); + + List wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); + + // delete 5 out of 10 rows + partiallyDeleteInsertedRows(); + + // re-execute same statement: server should return 5 rows for the 5 failed inserts, each one + // with [applied] = false + rs = session.executeReactive(batch); + results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).hasSize(5); + for (int i = 0; i < 5; i++) { + row = results.get(i); + assertThat(row.getExecutionInfo()).isNotNull(); + assertThat(row.getColumnDefinitions()).hasSize(4); + assertThat(row.wasApplied()).isFalse(); + assertThat(row.getBoolean("[applied]")).isFalse(); + assertThat(row.getInt("pk")).isEqualTo(0); + assertThat(row.getInt("cc")).isEqualTo(i); + assertThat(row.getInt("v")).isEqualTo(i + 1); + } + + execInfos = + Flowable.fromPublisher(rs.getExecutionInfos()).toList().blockingGet(); + assertThat(execInfos).hasSize(1).containsExactly(row.getExecutionInfo()); + + colDefs = + Flowable.fromPublisher(rs.getColumnDefinitions()).toList().blockingGet(); + assertThat(colDefs).hasSize(1).containsExactly(row.getColumnDefinitions()); + + wasApplied = Flowable.fromPublisher(rs.wasApplied()).toList().blockingGet(); + assertThat(wasApplied).hasSize(1).containsExactly(row.wasApplied()); + } + + @NonNull + private static BatchStatement createCASBatch() { + // Build a batch with CAS operations on the same partition (conditional batch updates cannot + // span multiple partitions). + BatchStatementBuilder builder = BatchStatement.builder(DefaultBatchType.UNLOGGED); + SimpleStatement insert = + SimpleStatement.builder( + "INSERT INTO test_reactive_write (pk, cc, v) VALUES (0, ?, ?) IF NOT EXISTS") + .setExecutionProfile(sessionRule.slowProfile()) + .build(); + PreparedStatement preparedStatement = sessionRule.session().prepare(insert); + for (int i = 0; i < 10; i++) { + builder.addStatement(preparedStatement.bind(i, i + 1)); + } + return builder.build(); + } + + private static void partiallyDeleteInsertedRows() { + CqlSession session = sessionRule.session(); + session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 5"); + session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 6"); + session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 7"); + session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 8"); + session.execute(" DELETE FROM test_reactive_write WHERE pk = 0 and cc = 9"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/ReactiveRetryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/ReactiveRetryIT.java new file mode 100644 index 00000000000..e59c29f4262 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/cql/reactive/ReactiveRetryIT.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.cql.reactive; + +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.internal.verification.VerificationModeFactory.times; + +import com.codahale.metrics.Metric; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.testinfra.loadbalancing.NodeComparator; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.core.retry.PerProfileRetryPolicyIT.NoRetryPolicy; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; +import com.datastax.oss.simulacron.server.BoundCluster; +import com.google.common.collect.Iterables; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import io.reactivex.Flowable; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Queue; +import java.util.TreeSet; +import java.util.UUID; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +/** Small test to validate the application-level retry behavior explained in the manual. */ +@Category(ParallelizableTests.class) +public class ReactiveRetryIT { + + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) + .withClass( + DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, + CyclingLoadBalancingPolicy.class) + .withClass(DefaultDriverOption.RETRY_POLICY_CLASS, NoRetryPolicy.class) + .withStringList( + DefaultDriverOption.METRICS_NODE_ENABLED, + Collections.singletonList("errors.request.unavailables")) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); + + private static final String QUERY_STRING = "select * from foo"; + + private List nodes; + + @Before + public void clearPrimes() { + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + } + + @Before + public void createNodesList() { + nodes = new ArrayList<>(SESSION_RULE.session().getMetadata().getNodes().values()); + nodes.sort(NodeComparator.INSTANCE); + } + + @Test + public void should_retry_at_application_level() { + // Given + CqlSession session = spy(SESSION_RULE.session()); + BoundCluster cluster = SIMULACRON_RULE.cluster(); + cluster.node(0).prime(when(QUERY_STRING).then(unavailable(ConsistencyLevel.ONE, 1, 0))); + cluster.node(1).prime(when(QUERY_STRING).then(unavailable(ConsistencyLevel.ONE, 1, 0))); + cluster.node(2).prime(when(QUERY_STRING).then(rows().row("col1", "Yay!"))); + + // When + ReactiveRow row = + Flowable.defer(() -> session.executeReactive(QUERY_STRING)) + .retry( + (retry, error) -> { + assertThat(error).isInstanceOf(UnavailableException.class); + UnavailableException ue = (UnavailableException) error; + Node coordinator = ue.getCoordinator(); + if (retry == 1) { + assertCoordinator(0, coordinator); + return true; + } else if (retry == 2) { + assertCoordinator(1, coordinator); + return true; + } else { + fail("Unexpected retry attempt"); + return false; + } + }) + .blockingLast(); + + // Then + assertThat(row.getString(0)).isEqualTo("Yay!"); + verify(session, times(3)).executeReactive(QUERY_STRING); + assertUnavailableMetric(0, 1L); + assertUnavailableMetric(1, 1L); + assertUnavailableMetric(2, 0L); + } + + private void assertCoordinator(int expectedNodeIndex, Node actual) { + Node expected = nodes.get(expectedNodeIndex); + assertThat(actual).isSameAs(expected); + } + + private void assertUnavailableMetric(int nodeIndex, long expectedUnavailableCount) { + Metrics metrics = SESSION_RULE.session().getMetrics().orElseThrow(AssertionError::new); + Node node = nodes.get(nodeIndex); + Optional expectedMetric = metrics.getNodeMetric(node, DefaultNodeMetric.UNAVAILABLES); + assertThat(expectedMetric) + .isPresent() + .hasValueSatisfying( + metric -> assertThat(metric).extracting("count").isEqualTo(expectedUnavailableCount)); + } + + public static class CyclingLoadBalancingPolicy implements LoadBalancingPolicy { + + private final TreeSet nodes = new TreeSet<>(NodeComparator.INSTANCE); + private volatile Iterator iterator = Iterables.cycle(nodes).iterator(); + + @SuppressWarnings("unused") + public CyclingLoadBalancingPolicy(DriverContext context, String profileName) { + // constructor needed for loading via config. + } + + @Override + public void init(@NonNull Map nodes, @NonNull DistanceReporter distanceReporter) { + this.nodes.addAll(nodes.values()); + this.nodes.forEach(n -> distanceReporter.setDistance(n, NodeDistance.LOCAL)); + iterator = Iterables.cycle(this.nodes).iterator(); + } + + @NonNull + @Override + public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { + return new ArrayDeque<>(Collections.singleton(iterator.next())); + } + + @Override + public void onAdd(@NonNull Node node) {} + + @Override + public void onUp(@NonNull Node node) {} + + @Override + public void onDown(@NonNull Node node) {} + + @Override + public void onRemove(@NonNull Node node) {} + + @Override + public void close() {} + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/data/DataTypeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/data/DataTypeIT.java similarity index 88% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/data/DataTypeIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/data/DataTypeIT.java index 0510d95b96e..e3d891454de 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/data/DataTypeIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/data/DataTypeIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.data; +package com.datastax.oss.driver.core.data; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.fail; @@ -29,6 +31,12 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.data.CqlDuration; +import com.datastax.oss.driver.api.core.data.CqlVector; +import com.datastax.oss.driver.api.core.data.SettableByIndex; +import com.datastax.oss.driver.api.core.data.SettableByName; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.data.UdtValue; import com.datastax.oss.driver.api.core.type.CustomType; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; @@ -82,11 +90,12 @@ @Category(ParallelizableTests.class) @RunWith(DataProviderRunner.class) public class DataTypeIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @Rule public TestName name = new TestName(); @@ -143,7 +152,7 @@ DataTypes.VARINT, new BigInteger(Integer.toString(Integer.MAX_VALUE) + "000") } }; - Version version = ccm.getCassandraVersion(); + Version version = CCM_RULE.getCassandraVersion(); // Filter types if they aren't supported by cassandra version in use. return Arrays.stream(samples) .filter( @@ -175,6 +184,7 @@ public static Object[][] typeSamples() { // 5) include map // 6) include tuple // 7) include udt + // 8) include vector return Arrays.stream(primitiveSamples) .flatMap( o -> { @@ -240,7 +250,7 @@ public static Object[][] typeSamples() { UserDefinedType udt = new DefaultUserDefinedType( - sessionRule.keyspace(), + SESSION_RULE.keyspace(), CqlIdentifier.fromCql(userTypeFor(types)), false, typeNames, @@ -255,6 +265,30 @@ public static Object[][] typeSamples() { UdtValue udtValue2 = udt.newValue(1, o[1]); samples.add(new Object[] {udt, udtValue2}); + if (CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) >= 0) { + // vector of type + CqlVector vector = CqlVector.newInstance(o[1]); + samples.add(new Object[] {DataTypes.vectorOf(dataType, 1), vector}); + } + + return samples.stream(); + }) + .toArray(Object[][]::new); + } + + @DataProvider + public static Object[][] addVectors() { + Object[][] previousSamples = typeSamples(); + if (CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) < 0) return previousSamples; + return Arrays.stream(previousSamples) + .flatMap( + o -> { + List samples = new ArrayList<>(); + samples.add(o); + if (o[1] == null) return samples.stream(); + DataType dataType = (DataType) o[0]; + CqlVector vector = CqlVector.newInstance(o[1]); + samples.add(new Object[] {DataTypes.vectorOf(dataType, 1), vector}); return samples.stream(); }) .toArray(Object[][]::new); @@ -270,7 +304,7 @@ public static void createTable() { List columnData = new ArrayList<>(); - for (Object[] sample : typeSamples()) { + for (Object[] sample : addVectors()) { DataType dataType = (DataType) sample[0]; if (!typeToColumnName.containsKey(dataType)) { @@ -281,14 +315,14 @@ public static void createTable() { } } - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder( String.format( "CREATE TABLE IF NOT EXISTS %s (k int primary key, %s)", tableName, String.join(",", columnData))) - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); } @@ -300,11 +334,11 @@ private static int nextKey() { return keyCounter.incrementAndGet(); } - @UseDataProvider("typeSamples") + @UseDataProvider("addVectors") @Test public void should_insert_non_primary_key_column_simple_statement_using_format( DataType dataType, K value, K expectedPrimitiveValue) { - TypeCodec codec = sessionRule.session().getContext().getCodecRegistry().codecFor(dataType); + TypeCodec codec = SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType); int key = nextKey(); String columnName = columnNameFor(dataType); @@ -317,7 +351,7 @@ public void should_insert_non_primary_key_column_simple_statement_using_form .addPositionalValue(key) .build(); - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); SimpleStatement select = SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) @@ -327,7 +361,7 @@ public void should_insert_non_primary_key_column_simple_statement_using_form readValue(select, dataType, value, expectedPrimitiveValue); } - @UseDataProvider("typeSamples") + @UseDataProvider("addVectors") @Test public void should_insert_non_primary_key_column_simple_statement_positional_value( DataType dataType, K value, K expectedPrimitiveValue) { @@ -340,7 +374,7 @@ public void should_insert_non_primary_key_column_simple_statement_positional .addPositionalValues(key, value) .build(); - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); SimpleStatement select = SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) @@ -350,7 +384,7 @@ public void should_insert_non_primary_key_column_simple_statement_positional readValue(select, dataType, value, expectedPrimitiveValue); } - @UseDataProvider("typeSamples") + @UseDataProvider("addVectors") @Test public void should_insert_non_primary_key_column_simple_statement_named_value( DataType dataType, K value, K expectedPrimitiveValue) { @@ -364,7 +398,7 @@ public void should_insert_non_primary_key_column_simple_statement_named_valu .addNamedValue("v", value) .build(); - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); SimpleStatement select = SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) @@ -374,7 +408,7 @@ public void should_insert_non_primary_key_column_simple_statement_named_valu readValue(select, dataType, value, expectedPrimitiveValue); } - @UseDataProvider("typeSamples") + @UseDataProvider("addVectors") @Test public void should_insert_non_primary_key_column_bound_statement_positional_value( DataType dataType, K value, K expectedPrimitiveValue) { @@ -386,24 +420,24 @@ public void should_insert_non_primary_key_column_bound_statement_positional_ String.format("INSERT INTO %s (k, %s) values (?, ?)", tableName, columnName)) .build(); - PreparedStatement preparedInsert = sessionRule.session().prepare(insert); + PreparedStatement preparedInsert = SESSION_RULE.session().prepare(insert); BoundStatementBuilder boundBuilder = preparedInsert.boundStatementBuilder(); boundBuilder = setValue(0, boundBuilder, DataTypes.INT, key); boundBuilder = setValue(1, boundBuilder, dataType, value); BoundStatement boundInsert = boundBuilder.build(); - sessionRule.session().execute(boundInsert); + SESSION_RULE.session().execute(boundInsert); SimpleStatement select = SimpleStatement.builder(String.format("SELECT %s FROM %s where k=?", columnName, tableName)) .build(); - PreparedStatement preparedSelect = sessionRule.session().prepare(select); + PreparedStatement preparedSelect = SESSION_RULE.session().prepare(select); BoundStatement boundSelect = setValue(0, preparedSelect.bind(), DataTypes.INT, key); readValue(boundSelect, dataType, value, expectedPrimitiveValue); } - @UseDataProvider("typeSamples") + @UseDataProvider("addVectors") @Test public void should_insert_non_primary_key_column_bound_statement_named_value( DataType dataType, K value, K expectedPrimitiveValue) { @@ -415,19 +449,19 @@ public void should_insert_non_primary_key_column_bound_statement_named_value String.format("INSERT INTO %s (k, %s) values (:k, :v)", tableName, columnName)) .build(); - PreparedStatement preparedInsert = sessionRule.session().prepare(insert); + PreparedStatement preparedInsert = SESSION_RULE.session().prepare(insert); BoundStatementBuilder boundBuilder = preparedInsert.boundStatementBuilder(); boundBuilder = setValue("k", boundBuilder, DataTypes.INT, key); boundBuilder = setValue("v", boundBuilder, dataType, value); BoundStatement boundInsert = boundBuilder.build(); - sessionRule.session().execute(boundInsert); + SESSION_RULE.session().execute(boundInsert); SimpleStatement select = SimpleStatement.builder( String.format("SELECT %s FROM %s where k=:k", columnName, tableName)) .build(); - PreparedStatement preparedSelect = sessionRule.session().prepare(select); + PreparedStatement preparedSelect = SESSION_RULE.session().prepare(select); BoundStatement boundSelect = setValue("k", preparedSelect.bind(), DataTypes.INT, key); boundSelect = boundSelect.setInt("k", key); @@ -437,8 +471,8 @@ public void should_insert_non_primary_key_column_bound_statement_named_value private static > S setValue( int index, S bs, DataType dataType, Object value) { TypeCodec codec = - sessionRule.session() != null - ? sessionRule.session().getContext().getCodecRegistry().codecFor(dataType) + SESSION_RULE.session() != null + ? SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType) : null; // set to null if value is null instead of getting possible NPE when casting from null to @@ -529,8 +563,8 @@ private static > S setValue( private static > S setValue( String name, S bs, DataType dataType, Object value) { TypeCodec codec = - sessionRule.session() != null - ? sessionRule.session().getContext().getCodecRegistry().codecFor(dataType) + SESSION_RULE.session() != null + ? SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType) : null; // set to null if value is null instead of getting possible NPE when casting from null to @@ -621,8 +655,8 @@ private static > S setValue( private void readValue( Statement select, DataType dataType, K value, K expectedPrimitiveValue) { TypeCodec codec = - sessionRule.session().getContext().getCodecRegistry().codecFor(dataType); - ResultSet result = sessionRule.session().execute(select); + SESSION_RULE.session().getContext().getCodecRegistry().codecFor(dataType); + ResultSet result = SESSION_RULE.session().execute(select); String columnName = columnNameFor(dataType); @@ -745,7 +779,7 @@ private void readValue( } // Decode directly using the codec - ProtocolVersion protocolVersion = sessionRule.session().getContext().getProtocolVersion(); + ProtocolVersion protocolVersion = SESSION_RULE.session().getContext().getProtocolVersion(); assertThat(codec.decode(row.getBytesUnsafe(columnName), protocolVersion)).isEqualTo(value); assertThat(codec.decode(row.getBytesUnsafe(0), protocolVersion)).isEqualTo(value); } @@ -763,14 +797,14 @@ private static String typeFor(DataType dataType) { fieldParts.add(fieldName + " " + fieldType); } - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder( String.format( "CREATE TYPE IF NOT EXISTS %s (%s)", udt.getName().asCql(false), String.join(",", fieldParts))) - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); // Chances are the UDT isn't labeled as frozen in the context we're given, so we add it as diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/heartbeat/HeartbeatDisabledIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatDisabledIT.java similarity index 72% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/heartbeat/HeartbeatDisabledIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatDisabledIT.java index c6312e6484b..7d90f124fb3 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/heartbeat/HeartbeatDisabledIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatDisabledIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.heartbeat; +package com.datastax.oss.driver.core.heartbeat; import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; @@ -33,7 +35,8 @@ public class HeartbeatDisabledIT { @ClassRule - public static SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(2)); + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(2)); @Test public void should_not_send_heartbeat_when_disabled() throws InterruptedException { @@ -43,7 +46,7 @@ public void should_not_send_heartbeat_when_disabled() throws InterruptedExceptio SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.HEARTBEAT_INTERVAL, Duration.ofSeconds(0)) .build(); - try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { + try (CqlSession ignored = SessionUtils.newSession(SIMULACRON_RULE, loader)) { AtomicInteger heartbeats = registerHeartbeatListener(); SECONDS.sleep(35); @@ -53,7 +56,7 @@ public void should_not_send_heartbeat_when_disabled() throws InterruptedExceptio private AtomicInteger registerHeartbeatListener() { AtomicInteger nonControlHeartbeats = new AtomicInteger(); - simulacron + SIMULACRON_RULE .cluster() .registerQueryListener( (n, l) -> nonControlHeartbeats.incrementAndGet(), diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/heartbeat/HeartbeatIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatIT.java similarity index 78% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/heartbeat/HeartbeatIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatIT.java index febf21f93ff..26658bd76d1 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/heartbeat/HeartbeatIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/heartbeat/HeartbeatIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,9 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.heartbeat; +package com.datastax.oss.driver.core.heartbeat; -import static com.datastax.oss.driver.api.testinfra.utils.ConditionChecker.checkThat; import static com.datastax.oss.driver.api.testinfra.utils.NodeUtils.waitForDown; import static com.datastax.oss.driver.api.testinfra.utils.NodeUtils.waitForUp; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.closeConnection; @@ -24,6 +25,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; +import static org.awaitility.Awaitility.await; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -46,30 +48,30 @@ import java.net.SocketAddress; import java.time.Duration; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; import java.util.stream.Collectors; import org.junit.Before; -import org.junit.Rule; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @Category(ParallelizableTests.class) public class HeartbeatIT { - @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); private static final String QUERY = "select * from foo"; - private static final Predicate IS_OPTION_REQUEST = - (l) -> l.getQuery().equals("OPTIONS"); - private BoundNode simulacronNode; @Before public void setUp() { - simulacron.cluster().clearLogs(); - simulacron.cluster().clearPrimes(true); - simulacronNode = simulacron.cluster().getNodes().iterator().next(); + SIMULACRON_RULE.cluster().acceptConnections(); + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + simulacronNode = SIMULACRON_RULE.cluster().getNodes().iterator().next(); } @Test @@ -104,11 +106,12 @@ public void should_not_send_heartbeat_during_protocol_initialization() { // Try to create a session. Note that the init query timeout is twice the heartbeat interval, so // we're sure that at least one heartbeat would be sent if it was not properly disabled during // init. - try (CqlSession session = newSession()) { + try (CqlSession ignored = newSession()) { fail("Expected session creation to fail"); } catch (Exception expected) { - // no heartbeats should have been sent while protocol was initializing. - assertThat(getHeartbeatsForNode()).isEmpty(); + // no heartbeats should have been sent while protocol was initializing, but one OPTIONS + // message is expected to be sent as part of the initialization process. + assertThat(getHeartbeatsForNode()).hasSize(1); } } @@ -118,9 +121,12 @@ public void should_send_heartbeat_on_control_connection() { ProgrammaticDriverConfigLoaderBuilder loader = SessionUtils.configLoaderBuilder() .withInt(DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE, 0); - try (CqlSession session = newSession(loader)) { + try (CqlSession ignored = newSession(loader)) { AtomicInteger heartbeats = countHeartbeatsOnControlConnection(); - checkThat(() -> heartbeats.get() > 0).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> heartbeats.get() > 0); } } @@ -142,7 +148,10 @@ public void should_send_heartbeat_on_regular_connection() throws InterruptedExce assertThat(nonControlHeartbeats.get()).isZero(); // Stop querying, heartbeats should be sent again - checkThat(() -> nonControlHeartbeats.get() >= 1).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> nonControlHeartbeats.get() >= 1); } } @@ -151,7 +160,7 @@ public void should_send_heartbeat_when_requests_being_written_but_nothing_receiv throws InterruptedException { // Prime a query that will never return a response. String noResponseQueryStr = "delay"; - simulacron.cluster().prime(when(noResponseQueryStr).then(noResult())); + SIMULACRON_RULE.cluster().prime(when(noResponseQueryStr).then(noResult())); try (CqlSession session = newSession()) { AtomicInteger heartbeats = countHeartbeatsOnRegularConnection(); @@ -176,9 +185,12 @@ public void should_close_connection_when_heartbeat_times_out() { // Ensure we get some heartbeats and the node remains up. AtomicInteger heartbeats = new AtomicInteger(); simulacronNode.registerQueryListener( - (n, l) -> heartbeats.incrementAndGet(), true, IS_OPTION_REQUEST); + (n, l) -> heartbeats.incrementAndGet(), true, this::isOptionRequest); - checkThat(() -> heartbeats.get() >= 2).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> heartbeats.get() >= 2); assertThat(node.getState()).isEqualTo(NodeState.UP); // configure node to not respond to options request, which should cause a timeout. @@ -186,7 +198,10 @@ public void should_close_connection_when_heartbeat_times_out() { heartbeats.set(0); // wait for heartbeat to be sent. - checkThat(() -> heartbeats.get() >= 1).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> heartbeats.get() >= 1); heartbeats.set(0); // node should go down because heartbeat was unanswered. @@ -198,7 +213,10 @@ public void should_close_connection_when_heartbeat_times_out() { // wait for node to come up again and ensure heartbeats are successful and node remains up. waitForUp(node); - checkThat(() -> heartbeats.get() >= 2).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> heartbeats.get() >= 2); assertThat(node.getState()).isEqualTo(NodeState.UP); } } @@ -218,7 +236,7 @@ private CqlSession newSession(ProgrammaticDriverConfigLoaderBuilder loaderBuilde .withDuration(DefaultDriverOption.CONNECTION_INIT_QUERY_TIMEOUT, Duration.ofSeconds(2)) .withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofSeconds(1)) .build(); - return SessionUtils.newSession(simulacron, loader); + return SessionUtils.newSession(SIMULACRON_RULE, loader); } private AtomicInteger countHeartbeatsOnRegularConnection() { @@ -232,13 +250,13 @@ private AtomicInteger countHeartbeatsOnControlConnection() { private AtomicInteger countHeartbeats(boolean regularConnection) { SocketAddress controlConnectionAddress = findControlConnectionAddress(); AtomicInteger count = new AtomicInteger(); - simulacron + SIMULACRON_RULE .cluster() .registerQueryListener( (n, l) -> count.incrementAndGet(), false, (l) -> - IS_OPTION_REQUEST.test(l) + isOptionRequest(l) && (regularConnection ^ l.getConnection().equals(controlConnectionAddress))); return count; } @@ -258,4 +276,8 @@ private List getHeartbeatsForNode() { .filter(l -> l.getQuery().equals("OPTIONS")) .collect(Collectors.toList()); } + + private boolean isOptionRequest(QueryLog l) { + return l.getQuery().equals("OPTIONS"); + } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/AllLoadBalancingPoliciesSimulacronIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/AllLoadBalancingPoliciesSimulacronIT.java new file mode 100644 index 00000000000..855cd6bb6a2 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/AllLoadBalancingPoliciesSimulacronIT.java @@ -0,0 +1,505 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.loadbalancing; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.NoNodeAvailableException; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.api.core.metadata.TokenMap; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.protocol.internal.request.Query; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.cluster.QueryLog; +import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; +import com.datastax.oss.simulacron.common.stubbing.PrimeDsl.RowBuilder; +import com.datastax.oss.simulacron.server.BoundCluster; +import com.datastax.oss.simulacron.server.BoundNode; +import com.datastax.oss.simulacron.server.BoundTopic; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +@Category(ParallelizableTests.class) +@RunWith(DataProviderRunner.class) +public class AllLoadBalancingPoliciesSimulacronIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(5, 5, 5)); + + @Before + public void reset() { + SIMULACRON_RULE.cluster().start(); + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + SIMULACRON_RULE + .cluster() + .prime( + PrimeDsl.when("SELECT * FROM system_schema.keyspaces") + .then(new RowBuilder().columnTypes(KEYSPACE_COLUMNS).row(KEYSPACE_ROW).build())); + } + + @Test + @DataProvider({ + "BasicLoadBalancingPolicy,dc1", + "DefaultLoadBalancingPolicy,dc1", + "DcInferringLoadBalancingPolicy,dc1", + "DcInferringLoadBalancingPolicy,null", + }) + public void should_round_robin_within_local_dc_when_dc_aware_but_not_token_aware( + String lbp, String dc) { + + // given: DC is provided or inferred, token awareness is disabled and remote DCs are allowed + try (CqlSession session = newSession(lbp, dc, 2, true, false)) { + + // when: a query is executed 50 times. + for (int i = 0; i < 50; i++) { + session.execute(QUERY); + } + + // then: each node in local DC should get an equal number of requests. + for (int i = 0; i < 5; i++) { + assertThat(queries(0, i).count()).isEqualTo(10); + } + + // then: no node in the remote DC should get a request. + assertThat(queries(1).count()).isEqualTo(0); + assertThat(queries(2).count()).isEqualTo(0); + } + } + + @Test + @DataProvider({ + "BasicLoadBalancingPolicy,dc1,ONE", + "BasicLoadBalancingPolicy,dc1,LOCAL_ONE", + "BasicLoadBalancingPolicy,dc1,TWO", + "BasicLoadBalancingPolicy,dc1,QUORUM", + "BasicLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DefaultLoadBalancingPolicy,dc1,ONE", + "DefaultLoadBalancingPolicy,dc1,LOCAL_ONE", + "DefaultLoadBalancingPolicy,dc1,TWO", + "DefaultLoadBalancingPolicy,dc1,QUORUM", + "DefaultLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DcInferringLoadBalancingPolicy,dc1,ONE", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_ONE", + "DcInferringLoadBalancingPolicy,dc1,TWO", + "DcInferringLoadBalancingPolicy,dc1,QUORUM", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DcInferringLoadBalancingPolicy,null,ONE", + "DcInferringLoadBalancingPolicy,null,LOCAL_ONE", + "DcInferringLoadBalancingPolicy,null,TWO", + "DcInferringLoadBalancingPolicy,null,QUORUM", + "DcInferringLoadBalancingPolicy,null,LOCAL_QUORUM", + }) + public void should_use_local_replicas_when_dc_aware_and_token_aware_and_enough_local_replicas_up( + String lbp, String dc, DefaultConsistencyLevel cl) { + + // given: DC is provided or inferred, token awareness enabled, remotes allowed, CL <= 2 + try (CqlSession session = newSession(lbp, dc, 2, true)) { + + // given: one replica and 2 non-replicas down in local DC, but CL <= 2 still achievable + List aliveReplicas = degradeLocalDc(session); + + // when: a query is executed 50 times and some nodes are down in the local DC. + for (int i = 0; i < 50; i++) { + session.execute( + SimpleStatement.newInstance(QUERY) + .setConsistencyLevel(cl) + .setRoutingKeyspace("test") + .setRoutingKey(ROUTING_KEY)); + } + + // then: all requests should be distributed to the remaining up replicas in local DC + BoundNode alive1 = findNode(aliveReplicas.get(0)); + BoundNode alive2 = findNode(aliveReplicas.get(1)); + assertThat(queries(alive1).count() + queries(alive2).count()).isEqualTo(50); + + // then: no node in the remote DCs should get a request. + assertThat(queries(1).count()).isEqualTo(0); + assertThat(queries(2).count()).isEqualTo(0); + } + } + + @Test + public void should_round_robin_within_all_dcs_when_dc_agnostic() { + + // given: DC-agnostic LBP, no local DC, remotes not allowed, token awareness enabled + try (CqlSession session = newSession("BasicLoadBalancingPolicy", null, 0, false)) { + + // when: a query is executed 150 times. + for (int i = 0; i < 150; i++) { + session.execute( + SimpleStatement.newInstance(QUERY) + // local CL should be ignored since there is no local DC + .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)); + } + + // then: each node should get 10 requests, even remote ones since the LBP is DC-agnostic. + for (int dc = 0; dc < 3; dc++) { + for (int n = 0; n < 5; n++) { + assertThat(queries(dc, n).count()).isEqualTo(10); + } + } + } + } + + @Test + @DataProvider({ + "BasicLoadBalancingPolicy,dc1,ONE", + "BasicLoadBalancingPolicy,dc1,TWO", + "BasicLoadBalancingPolicy,dc1,THREE", + "BasicLoadBalancingPolicy,dc1,QUORUM", + "BasicLoadBalancingPolicy,dc1,ANY", + "DefaultLoadBalancingPolicy,dc1,ONE", + "DefaultLoadBalancingPolicy,dc1,TWO", + "DefaultLoadBalancingPolicy,dc1,THREE", + "DefaultLoadBalancingPolicy,dc1,QUORUM", + "DefaultLoadBalancingPolicy,dc1,ANY", + "DcInferringLoadBalancingPolicy,dc1,ONE", + "DcInferringLoadBalancingPolicy,dc1,TWO", + "DcInferringLoadBalancingPolicy,dc1,THREE", + "DcInferringLoadBalancingPolicy,dc1,QUORUM", + "DcInferringLoadBalancingPolicy,dc1,ANY", + "DcInferringLoadBalancingPolicy,null,ONE", + "DcInferringLoadBalancingPolicy,null,TWO", + "DcInferringLoadBalancingPolicy,null,THREE", + "DcInferringLoadBalancingPolicy,null,QUORUM", + "DcInferringLoadBalancingPolicy,null,ANY", + }) + public void should_use_remote_nodes_when_no_up_nodes_in_local_dc_for_non_local_cl( + String lbp, String dc, DefaultConsistencyLevel cl) { + + // given: 1 remote allowed per DC and a non-local CL, token awareness enabled + try (CqlSession session = newSession(lbp, dc, 1, false)) { + + // given: local DC is down + stopLocalDc(session); + + // when: a query is executed 50 times and all nodes are down in local DC. + for (int i = 0; i < 50; i++) { + session.execute( + SimpleStatement.newInstance(QUERY) + .setConsistencyLevel(cl) + .setRoutingKeyspace("test") + .setRoutingKey(ROUTING_KEY)); + } + + // then: only 1 node in each remote DC should get requests (we can't know which ones exactly). + assertThat(queries(1).count() + queries(2).count()).isEqualTo(50); + } + } + + @Test + @DataProvider({ + "BasicLoadBalancingPolicy,dc1,LOCAL_ONE", + "BasicLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "BasicLoadBalancingPolicy,dc1,LOCAL_SERIAL", + "DefaultLoadBalancingPolicy,dc1,LOCAL_ONE", + "DefaultLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DefaultLoadBalancingPolicy,dc1,LOCAL_SERIAL", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_ONE", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_SERIAL", + "DcInferringLoadBalancingPolicy,null,LOCAL_ONE", + "DcInferringLoadBalancingPolicy,null,LOCAL_QUORUM", + "DcInferringLoadBalancingPolicy,null,LOCAL_SERIAL", + }) + public void should_not_use_remote_nodes_when_using_local_cl( + String lbp, String dc, DefaultConsistencyLevel cl) { + + // given: remotes allowed but not for local CL, token awareness enabled, local CL + try (CqlSession session = newSession(lbp, dc, 5, false)) { + + // given: local DC is down + stopLocalDc(session); + + // when: a query is executed 50 times and all nodes are down in local DC. + for (int i = 0; i < 50; i++) { + Throwable t = + catchThrowable( + () -> + session.execute( + SimpleStatement.newInstance(QUERY) + .setConsistencyLevel(cl) + .setRoutingKeyspace("test") + .setRoutingKey(ROUTING_KEY))); + + // then: expect a NNAE for a local CL since no local replicas available. + assertThat(t).isInstanceOf(NoNodeAvailableException.class); + } + + // then: no node in the remote DCs should get a request. + assertThat(queries(1).count()).isEqualTo(0); + assertThat(queries(2).count()).isEqualTo(0); + } + } + + @Test + @DataProvider({ + "BasicLoadBalancingPolicy,dc1,LOCAL_ONE", + "BasicLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DefaultLoadBalancingPolicy,dc1,LOCAL_ONE", + "DefaultLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_ONE", + "DcInferringLoadBalancingPolicy,dc1,LOCAL_QUORUM", + "DcInferringLoadBalancingPolicy,null,LOCAL_ONE", + "DcInferringLoadBalancingPolicy,null,LOCAL_QUORUM", + }) + public void should_use_remote_nodes_when_using_local_cl_if_allowed( + String lbp, String dc, DefaultConsistencyLevel cl) { + + // given: only one node allowed per remote DC and remotes allowed even for local CLs. + try (CqlSession session = newSession(lbp, dc, 1, true)) { + + // given: local DC is down + stopLocalDc(session); + + // when: a query is executed 50 times and all nodes are down in local DC. + for (int i = 0; i < 50; i++) { + session.execute( + SimpleStatement.newInstance(QUERY) + .setConsistencyLevel(cl) + .setRoutingKeyspace("test") + .setRoutingKey(ROUTING_KEY)); + } + + // then: only 1 node in each remote DC should get requests (we can't know which ones exactly). + assertThat(queries(1).count() + queries(2).count()).isEqualTo(50); + } + } + + @Test + @DataProvider({ + "BasicLoadBalancingPolicy,dc1", + "DefaultLoadBalancingPolicy,dc1", + "DcInferringLoadBalancingPolicy,dc1", + "DcInferringLoadBalancingPolicy,null" + }) + public void should_not_use_excluded_dc_using_node_filter(String lbp, String dc) { + + // given: remotes allowed even for local CLs, but node filter excluding dc2 + try (CqlSession session = newSession(lbp, dc, 5, true, true, excludeDc("dc2"))) { + + // when: A query is made and nodes for the local dc are available. + for (int i = 0; i < 50; i++) { + session.execute( + SimpleStatement.newInstance(QUERY) + .setRoutingKeyspace("test") + .setRoutingKey(ROUTING_KEY)); + } + + // then: only nodes in the local DC should have been queried. + assertThat(queries(0).count()).isEqualTo(50); + assertThat(queries(1).count()).isEqualTo(0); + assertThat(queries(2).count()).isEqualTo(0); + + // given: local DC is down + stopLocalDc(session); + + SIMULACRON_RULE.cluster().clearLogs(); + + // when: A query is made and all nodes in the local dc are down. + for (int i = 0; i < 50; i++) { + session.execute( + SimpleStatement.newInstance(QUERY) + .setRoutingKeyspace("test") + .setRoutingKey(ROUTING_KEY)); + } + + // then: Only nodes in DC3 should have been queried, since DC2 is excluded and DC1 is down. + assertThat(queries(0).count()).isEqualTo(0); + assertThat(queries(1).count()).isEqualTo(0); + assertThat(queries(2).count()).isEqualTo(50); + } + } + + private static final ByteBuffer ROUTING_KEY = ByteBuffer.wrap(new byte[] {1, 2, 3, 4}); + + private static final String[] KEYSPACE_COLUMNS = + new String[] { + "keyspace_name", "varchar", + "durable_writes", "boolean", + "replication", "map" + }; + + private static final Object[] KEYSPACE_ROW = + new Object[] { + "keyspace_name", + "test", + "durable_writes", + true, + "replication", + ImmutableMap.of( + "class", + "org.apache.cassandra.locator.NetworkTopologyStrategy", + "dc1", + "3", + "dc2", + "3", + "dc3", + "3") + }; + + private static final String QUERY = "SELECT * FROM test.foo"; + + private CqlSession newSession(String lbp, String dc, int maxRemoteNodes, boolean allowLocalCl) { + return newSession(lbp, dc, maxRemoteNodes, allowLocalCl, true); + } + + private CqlSession newSession( + String lbp, String dc, int maxRemoteNodes, boolean allowLocalCl, boolean tokenAware) { + return newSession(lbp, dc, maxRemoteNodes, allowLocalCl, tokenAware, null); + } + + private CqlSession newSession( + String lbp, + String dc, + int maxRemoteNodes, + boolean allowLocalCl, + boolean tokenAware, + Predicate nodeFilter) { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, tokenAware) + .withString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, lbp) + .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, dc) + .withInt( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, + maxRemoteNodes) + .withBoolean( + DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, + allowLocalCl) + .build(); + return SessionUtils.newSession(SIMULACRON_RULE, null, null, null, nodeFilter, loader); + } + + private BoundNode findNode(Node node) { + BoundCluster simulacron = SIMULACRON_RULE.cluster(); + SocketAddress toFind = node.getEndPoint().resolve(); + for (BoundNode boundNode : simulacron.getNodes()) { + if (boundNode.getAddress().equals(toFind)) { + return boundNode; + } + } + throw new AssertionError("Could not find node: " + toFind); + } + + private void stopLocalDc(CqlSession session) { + SIMULACRON_RULE.cluster().dc(0).stop(); + awaitDown(nodesInDc(session, "dc1")); + } + + private List degradeLocalDc(CqlSession session) { + // stop 1 replica and 2 non-replicas in dc1 + List localReplicas = replicasInDc(session, "dc1"); + assertThat(localReplicas).hasSize(3); + BoundNode replica1 = findNode(localReplicas.get(0)); + + List localOthers = nonReplicasInDc(session, "dc1"); + assertThat(localOthers).hasSize(2); + BoundNode other1 = findNode(localOthers.get(0)); + BoundNode other2 = findNode(localOthers.get(1)); + + replica1.stop(); + other1.stop(); + other2.stop(); + + awaitDown(localReplicas.get(0), localOthers.get(0), localOthers.get(1)); + return localReplicas.subList(1, 3); + } + + private Stream queries(int dc, int node) { + return queries(SIMULACRON_RULE.cluster().dc(dc).node(node)); + } + + private Stream queries(int dc) { + return queries(SIMULACRON_RULE.cluster().dc(dc)); + } + + private Stream queries(BoundTopic topic) { + return topic.getLogs().getQueryLogs().stream() + .filter(q -> q.getFrame().message instanceof Query) + .filter(q -> ((Query) q.getFrame().message).query.equals(QUERY)); + } + + private List nodesInDc(CqlSession session, String dcName) { + return session.getMetadata().getNodes().values().stream() + .filter(n -> Objects.equals(n.getDatacenter(), dcName)) + .collect(Collectors.toList()); + } + + private List replicasInDc(CqlSession session, String dcName) { + assertThat(session.getMetadata().getTokenMap()).isPresent(); + TokenMap tokenMap = session.getMetadata().getTokenMap().get(); + return tokenMap.getReplicas("test", ROUTING_KEY).stream() + .filter(n -> Objects.equals(n.getDatacenter(), dcName)) + .collect(Collectors.toList()); + } + + private List nonReplicasInDc( + CqlSession session, @SuppressWarnings("SameParameterValue") String dcName) { + List nodes = nodesInDc(session, dcName); + nodes.removeAll(replicasInDc(session, dcName)); + return nodes; + } + + private Predicate excludeDc(@SuppressWarnings("SameParameterValue") String dcName) { + return node -> !Objects.equals(node.getDatacenter(), dcName); + } + + private void awaitDown(Node... nodes) { + awaitDown(Arrays.asList(nodes)); + } + + private void awaitDown(Iterable nodes) { + await() + .atMost(Duration.ofSeconds(10)) + .untilAsserted( + () -> { + for (Node node : nodes) { + assertThat(node.getState()).isEqualTo(NodeState.DOWN); + } + }); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/DefaultLoadBalancingPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/DefaultLoadBalancingPolicyIT.java similarity index 82% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/DefaultLoadBalancingPolicyIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/DefaultLoadBalancingPolicyIT.java index 41309a36f8a..af454fc6458 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/DefaultLoadBalancingPolicyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/DefaultLoadBalancingPolicyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.loadbalancing; +package com.datastax.oss.driver.core.loadbalancing; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; import static org.assertj.core.api.Assertions.withinPercentage; +import static org.awaitility.Awaitility.await; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -26,6 +29,7 @@ import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeState; @@ -34,7 +38,6 @@ import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; import com.google.common.collect.ImmutableList; @@ -47,6 +50,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -57,10 +61,10 @@ public class DefaultLoadBalancingPolicyIT { private static final String LOCAL_DC = "dc1"; - private static CustomCcmRule ccmRule = CustomCcmRule.builder().withNodes(4, 1).build(); + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withNodes(4, 1).build(); - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withKeyspace(false) .withConfigLoader( SessionUtils.configLoaderBuilder() @@ -68,11 +72,12 @@ public class DefaultLoadBalancingPolicyIT { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( "CREATE KEYSPACE test " + "WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 2, 'dc2': 1}"); @@ -81,7 +86,7 @@ public static void setup() { @Test public void should_ignore_remote_dcs() { - for (Node node : sessionRule.session().getMetadata().getNodes().values()) { + for (Node node : SESSION_RULE.session().getMetadata().getNodes().values()) { if (LOCAL_DC.equals(node.getDatacenter())) { assertThat(node.getDistance()).isEqualTo(NodeDistance.LOCAL); assertThat(node.getState()).isEqualTo(NodeState.UP); @@ -99,7 +104,7 @@ public void should_ignore_remote_dcs() { @Test public void should_use_round_robin_on_local_dc_when_not_enough_routing_information() { ByteBuffer routingKey = TypeCodecs.INT.encodePrimitive(1, ProtocolVersion.DEFAULT); - TokenMap tokenMap = sessionRule.session().getMetadata().getTokenMap().get(); + TokenMap tokenMap = SESSION_RULE.session().getMetadata().getTokenMap().get(); // TODO add statements with setKeyspace when that is supported List statements = ImmutableList.of( @@ -118,7 +123,7 @@ public void should_use_round_robin_on_local_dc_when_not_enough_routing_informati for (Statement statement : statements) { List coordinators = new ArrayList<>(); for (int i = 0; i < 12; i++) { - ResultSet rs = sessionRule.session().execute(statement); + ResultSet rs = SESSION_RULE.session().execute(statement); Node coordinator = rs.getExecutionInfo().getCoordinator(); assertThat(coordinator.getDatacenter()).isEqualTo(LOCAL_DC); coordinators.add(coordinator); @@ -135,7 +140,7 @@ public void should_use_round_robin_on_local_dc_when_not_enough_routing_informati public void should_prioritize_replicas_when_routing_information_present() { CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); ByteBuffer routingKey = TypeCodecs.INT.encodePrimitive(1, ProtocolVersion.DEFAULT); - TokenMap tokenMap = sessionRule.session().getMetadata().getTokenMap().get(); + TokenMap tokenMap = SESSION_RULE.session().getMetadata().getTokenMap().get(); Set localReplicas = new HashSet<>(); for (Node replica : tokenMap.getReplicas(keyspace, routingKey)) { if (replica.getDatacenter().equals(LOCAL_DC)) { @@ -159,7 +164,7 @@ public void should_prioritize_replicas_when_routing_information_present() { // reasonable distribution: Map hits = new HashMap<>(); for (int i = 0; i < 2000; i++) { - ResultSet rs = sessionRule.session().execute(statement); + ResultSet rs = SESSION_RULE.session().execute(statement); Node coordinator = rs.getExecutionInfo().getCoordinator(); assertThat(localReplicas).contains(coordinator); assertThat(coordinator.getDatacenter()).isEqualTo(LOCAL_DC); @@ -176,17 +181,19 @@ public void should_prioritize_replicas_when_routing_information_present() { public void should_hit_non_replicas_when_routing_information_present_but_all_replicas_down() { CqlIdentifier keyspace = CqlIdentifier.fromCql("test"); ByteBuffer routingKey = TypeCodecs.INT.encodePrimitive(1, ProtocolVersion.DEFAULT); - TokenMap tokenMap = sessionRule.session().getMetadata().getTokenMap().get(); + TokenMap tokenMap = SESSION_RULE.session().getMetadata().getTokenMap().get(); - InternalDriverContext context = (InternalDriverContext) sessionRule.session().getContext(); + InternalDriverContext context = (InternalDriverContext) SESSION_RULE.session().getContext(); Set localReplicas = new HashSet<>(); for (Node replica : tokenMap.getReplicas(keyspace, routingKey)) { if (replica.getDatacenter().equals(LOCAL_DC)) { localReplicas.add(replica); context.getEventBus().fire(TopologyEvent.forceDown(replica.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat(() -> assertThat(replica.getOpenConnections()).isZero()) - .becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted(() -> assertThat(replica.getOpenConnections()).isZero()); } } assertThat(localReplicas).hasSize(2); @@ -204,7 +211,7 @@ public void should_hit_non_replicas_when_routing_information_present_but_all_rep for (Statement statement : statements) { List coordinators = new ArrayList<>(); for (int i = 0; i < 6; i++) { - ResultSet rs = sessionRule.session().execute(statement); + ResultSet rs = SESSION_RULE.session().execute(statement); Node coordinator = rs.getExecutionInfo().getCoordinator(); coordinators.add(coordinator); assertThat(coordinator.getDatacenter()).isEqualTo(LOCAL_DC); @@ -220,15 +227,17 @@ public void should_hit_non_replicas_when_routing_information_present_but_all_rep for (Node replica : localReplicas) { context.getEventBus().fire(TopologyEvent.forceUp(replica.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat(() -> assertThat(replica.getOpenConnections()).isPositive()) - .becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted(() -> assertThat(replica.getOpenConnections()).isPositive()); } } @Test public void should_apply_node_filter() { Set localNodes = new HashSet<>(); - for (Node node : sessionRule.session().getMetadata().getNodes().values()) { + for (Node node : SESSION_RULE.session().getMetadata().getNodes().values()) { if (node.getDatacenter().equals(LOCAL_DC)) { localNodes.add(node); } @@ -242,8 +251,8 @@ public void should_apply_node_filter() { // Open a separate session with a filter try (CqlSession session = SessionUtils.newSession( - ccmRule, - sessionRule.keyspace(), + CCM_RULE, + SESSION_RULE.keyspace(), null, null, node -> !node.getEndPoint().equals(ignoredEndPoint))) { diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/NodeTargetingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/NodeTargetingIT.java similarity index 66% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/NodeTargetingIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/NodeTargetingIT.java index 0ee418acb1e..f6a6176568a 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/NodeTargetingIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/NodeTargetingIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,12 +16,13 @@ * limitations under the License. */ -package com.datastax.oss.driver.api.core.loadbalancing; +package com.datastax.oss.driver.core.loadbalancing; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Fail.fail; +import static org.awaitility.Awaitility.await; import com.datastax.oss.driver.api.core.AllNodesFailedException; import com.datastax.oss.driver.api.core.CqlSession; @@ -32,7 +35,6 @@ import com.datastax.oss.driver.api.core.servererrors.UnavailableException; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; @@ -40,7 +42,7 @@ import java.net.InetSocketAddress; import java.util.concurrent.TimeUnit; import org.junit.Before; -import org.junit.Rule; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.RuleChain; @@ -49,19 +51,24 @@ @Category(ParallelizableTests.class) public class NodeTargetingIT { - private SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(5)); + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(5)); - private SessionRule sessionRule = SessionRule.builder(simulacron).build(); + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE).build(); - @Rule public TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); @Before public void clear() { - simulacron.cluster().clearLogs(); - simulacron.cluster().clearPrimes(true); - simulacron.cluster().node(4).stop(); - ConditionChecker.checkThat(() -> getNode(4).getState() == NodeState.DOWN) - .before(5, TimeUnit.SECONDS); + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + SIMULACRON_RULE.cluster().node(4).stop(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(5, TimeUnit.SECONDS) + .until(() -> getNode(4).getState() == NodeState.DOWN); } @Test @@ -74,7 +81,7 @@ public void should_use_node_on_statement() { Statement statement = SimpleStatement.newInstance("select * system.local").setNode(node); // when statement is executed - ResultSet result = sessionRule.session().execute(statement); + ResultSet result = SESSION_RULE.session().execute(statement); // then the query should have been sent to the configured node. assertThat(result.getExecutionInfo().getCoordinator()).isEqualTo(node); @@ -84,18 +91,21 @@ public void should_use_node_on_statement() { @Test public void should_fail_if_node_fails_query() { String query = "mock"; - simulacron.cluster().node(3).prime(when(query).then(unavailable(ConsistencyLevel.ALL, 1, 0))); + SIMULACRON_RULE + .cluster() + .node(3) + .prime(when(query).then(unavailable(ConsistencyLevel.ALL, 1, 0))); // given a statement with a node configured to fail the given query. Node node3 = getNode(3); Statement statement = SimpleStatement.newInstance(query).setNode(node3); // when statement is executed an error should be raised. try { - sessionRule.session().execute(statement); + SESSION_RULE.session().execute(statement); fail("Should have thrown AllNodesFailedException"); } catch (AllNodesFailedException e) { - assertThat(e.getErrors().size()).isEqualTo(1); - assertThat(e.getErrors().get(node3)).isInstanceOf(UnavailableException.class); + assertThat(e.getAllErrors().size()).isEqualTo(1); + assertThat(e.getAllErrors().get(node3).get(0)).isInstanceOf(UnavailableException.class); } } @@ -107,24 +117,24 @@ public void should_fail_if_node_is_not_connected() { Statement statement = SimpleStatement.newInstance("select * system.local").setNode(node4); try { // when statement is executed - sessionRule.session().execute(statement); + SESSION_RULE.session().execute(statement); fail("Query should have failed"); } catch (NoNodeAvailableException e) { - assertThat(e.getErrors()).isEmpty(); + assertThat(e.getAllErrors()).isEmpty(); } catch (AllNodesFailedException e) { // its also possible that the query is tried. This can happen if the node was marked // down, but not all connections have been closed yet. In this case, just verify that // the expected host failed. - assertThat(e.getErrors().size()).isEqualTo(1); - assertThat(e.getErrors()).containsOnlyKeys(node4); + assertThat(e.getAllErrors().size()).isEqualTo(1); + assertThat(e.getAllErrors()).containsOnlyKeys(node4); } } private Node getNode(int id) { - BoundNode boundNode = simulacron.cluster().node(id); + BoundNode boundNode = SIMULACRON_RULE.cluster().node(id); assertThat(boundNode).isNotNull(); InetSocketAddress address = (InetSocketAddress) boundNode.getAddress(); - return sessionRule + return SESSION_RULE .session() .getMetadata() .findNode(address) diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java similarity index 68% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java index 88e2ee8b24c..5113a8861b0 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/loadbalancing/PerProfileLoadBalancingPolicyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.loadbalancing; +package com.datastax.oss.driver.core.loadbalancing; import static com.datastax.oss.driver.assertions.Assertions.assertThat; @@ -25,12 +27,15 @@ import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import java.util.Objects; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -43,12 +48,12 @@ public class PerProfileLoadBalancingPolicyIT { // 3 2-node DCs - private static SimulacronRule simulacron = + private static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(2, 2, 2)); // default lb policy should consider dc1 local, profile1 dc3, profile2 empty. - private static SessionRule sessionRule = - SessionRule.builder(simulacron) + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, "dc1") @@ -59,20 +64,21 @@ public class PerProfileLoadBalancingPolicyIT { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); private static String QUERY_STRING = "select * from foo"; private static final SimpleStatement QUERY = SimpleStatement.newInstance(QUERY_STRING); @Before public void clear() { - simulacron.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearLogs(); } @BeforeClass public static void setup() { // sanity checks - DriverContext context = sessionRule.session().getContext(); + DriverContext context = SESSION_RULE.session().getContext(); DriverConfig config = context.getConfig(); assertThat(config.getProfiles()).containsKeys("profile1", "profile2"); @@ -87,10 +93,10 @@ public static void setup() { assertThat(defaultPolicy).isSameAs(policy2).isNotSameAs(policy1); - for (Node node : sessionRule.session().getMetadata().getNodes().values()) { + for (Node node : SESSION_RULE.session().getMetadata().getNodes().values()) { // if node is in dc2 it should be ignored, otherwise (dc1, dc3) it should be local. NodeDistance expectedDistance = - node.getDatacenter().equals("dc2") ? NodeDistance.IGNORED : NodeDistance.LOCAL; + Objects.equals(node.getDatacenter(), "dc2") ? NodeDistance.IGNORED : NodeDistance.LOCAL; assertThat(node.getDistance()).isEqualTo(expectedDistance); } } @@ -98,10 +104,12 @@ public static void setup() { @Test public void should_use_policy_from_request_profile() { // Since profile1 uses dc3 as localDC, only those nodes should receive these queries. - Statement statement = QUERY.setExecutionProfileName("profile1"); + Statement statement = QUERY.setExecutionProfileName("profile1"); for (int i = 0; i < 10; i++) { - ResultSet result = sessionRule.session().execute(statement); - assertThat(result.getExecutionInfo().getCoordinator().getDatacenter()).isEqualTo("dc3"); + ResultSet result = SESSION_RULE.session().execute(statement); + Node coordinator = result.getExecutionInfo().getCoordinator(); + assertThat(coordinator).isNotNull(); + assertThat(coordinator.getDatacenter()).isEqualTo("dc3"); } assertQueryInDc(0, 0); @@ -112,10 +120,12 @@ public void should_use_policy_from_request_profile() { @Test public void should_use_policy_from_config_when_not_configured_in_request_profile() { // Since profile2 does not define an lbp config, it should use default which uses dc1. - Statement statement = QUERY.setExecutionProfileName("profile2"); + Statement statement = QUERY.setExecutionProfileName("profile2"); for (int i = 0; i < 10; i++) { - ResultSet result = sessionRule.session().execute(statement); - assertThat(result.getExecutionInfo().getCoordinator().getDatacenter()).isEqualTo("dc1"); + ResultSet result = SESSION_RULE.session().execute(statement); + Node coordinator = result.getExecutionInfo().getCoordinator(); + assertThat(coordinator).isNotNull(); + assertThat(coordinator.getDatacenter()).isEqualTo("dc1"); } assertQueryInDc(0, 5); @@ -126,7 +136,7 @@ public void should_use_policy_from_config_when_not_configured_in_request_profile private void assertQueryInDc(int dc, int expectedPerNode) { for (int i = 0; i < 2; i++) { assertThat( - simulacron.cluster().dc(dc).node(i).getLogs().getQueryLogs().stream() + SIMULACRON_RULE.cluster().dc(dc).node(i).getLogs().getQueryLogs().stream() .filter(l -> l.getQuery().equals(QUERY_STRING))) .as("Expected query count to be %d for dc %d", 5, i) .hasSize(expectedPerNode); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenIT.java new file mode 100644 index 00000000000..278bb106eda --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metadata; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; +import java.time.Duration; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.CASSANDRA, + maxExclusive = "4.0-beta4", + description = + "Token allocation is not compatible with this partitioner, " + + "but is enabled by default in C* 4.0 (see CASSANDRA-7032 and CASSANDRA-13701)") +public class ByteOrderedTokenIT extends TokenITBase { + + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder() + .withNodes(3) + .withCreateOption("-p ByteOrderedPartitioner") + .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("request_timeout_in_ms", 45_000) + .build(); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) + .withKeyspace(false) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + public ByteOrderedTokenIT() { + super("org.apache.cassandra.dht.ByteOrderedPartitioner", ByteOrderedToken.class, false); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @BeforeClass + public static void createSchema() { + TokenITBase.createSchema(SESSION_RULE.session()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/ByteOrderedTokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenVnodesIT.java similarity index 50% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/ByteOrderedTokenVnodesIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenVnodesIT.java index 239b660345a..4d7cf8ad631 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/ByteOrderedTokenVnodesIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/ByteOrderedTokenVnodesIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,11 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; @@ -27,17 +31,27 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +@BackendRequirement( + type = BackendType.CASSANDRA, + maxExclusive = "4.0-beta4", + description = + "Token allocation is not compatible with this partitioner, " + + "but is enabled by default in C* 4.0 (see CASSANDRA-7032 and CASSANDRA-13701)") public class ByteOrderedTokenVnodesIT extends TokenITBase { - private static CustomCcmRule ccmRule = + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder() .withNodes(3) .withCreateOption("-p ByteOrderedPartitioner") .withCreateOption("--vnodes") + .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("request_timeout_in_ms", 45_000) .build(); - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withKeyspace(false) .withConfigLoader( SessionUtils.configLoaderBuilder() @@ -45,7 +59,8 @@ public class ByteOrderedTokenVnodesIT extends TokenITBase { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); public ByteOrderedTokenVnodesIT() { super("org.apache.cassandra.dht.ByteOrderedPartitioner", ByteOrderedToken.class, true); @@ -53,11 +68,11 @@ public ByteOrderedTokenVnodesIT() { @Override protected CqlSession session() { - return sessionRule.session(); + return SESSION_RULE.session(); } @BeforeClass public static void createSchema() { - TokenITBase.createSchema(sessionRule.session()); + TokenITBase.createSchema(SESSION_RULE.session()); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/CaseSensitiveUdtIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/CaseSensitiveUdtIT.java similarity index 82% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/CaseSensitiveUdtIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/CaseSensitiveUdtIT.java index 0587f29441e..f80b02207f8 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/CaseSensitiveUdtIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/CaseSensitiveUdtIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import static org.assertj.core.api.Assertions.assertThat; @@ -46,23 +48,24 @@ @Category(ParallelizableTests.class) public class CaseSensitiveUdtIT { - private static CcmRule ccmRule = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @Test public void should_expose_metadata_with_correct_case() { - boolean supportsFunctions = ccmRule.getCassandraVersion().compareTo(Version.V2_2_0) >= 0; + boolean supportsFunctions = CCM_RULE.getCassandraVersion().compareTo(Version.V2_2_0) >= 0; - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute("CREATE TYPE \"Address\"(street text)"); @@ -91,7 +94,7 @@ public void should_expose_metadata_with_correct_case() { KeyspaceMetadata keyspace = session .getMetadata() - .getKeyspace(sessionRule.keyspace()) + .getKeyspace(SESSION_RULE.keyspace()) .orElseThrow(() -> new AssertionError("Couldn't find rule's keyspace")); UserDefinedType addressType = diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/DescribeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/DescribeIT.java new file mode 100644 index 00000000000..4d6c2a7a3b1 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/DescribeIT.java @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metadata; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseKeyspaceMetadata; +import com.datastax.dse.driver.internal.core.metadata.schema.DefaultDseTableMetadata; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.SerializationHelper; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultKeyspaceMetadata; +import com.datastax.oss.driver.internal.core.metadata.schema.DefaultTableMetadata; +import com.datastax.oss.driver.shaded.guava.common.base.Charsets; +import com.datastax.oss.driver.shaded.guava.common.base.Splitter; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.google.common.io.Files; +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Pattern; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category(ParallelizableTests.class) +public class DescribeIT { + + private static final Logger LOG = LoggerFactory.getLogger(DescribeIT.class); + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + // disable debouncer to speed up test. + .withDuration(DefaultDriverOption.METADATA_SCHEMA_WINDOW, Duration.ofSeconds(0)) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static final Splitter STATEMENT_SPLITTER = + // Use a regex to ignore semicolons in function scripts + Splitter.on(Pattern.compile(";\n")).omitEmptyStrings(); + + private static Version serverVersion; + + private static final Map scriptFileForBackend = + ImmutableMap.builder() + .put(BackendType.CASSANDRA, "DescribeIT/oss") + .put(BackendType.DSE, "DescribeIT/dse") + .put(BackendType.HCD, "DescribeIT/hcd") + .build(); + + private static File scriptFile; + private static String scriptContents; + + @BeforeClass + public static void setup() { + serverVersion = + CCM_RULE.isDistributionOf(BackendType.CASSANDRA) + ? CCM_RULE.getCassandraVersion().nextStable() + : CCM_RULE.getDistributionVersion().nextStable(); + + scriptFile = getScriptFile(); + assertThat(scriptFile).exists(); + assertThat(scriptFile).isFile(); + assertThat(scriptFile).canRead(); + scriptContents = getScriptContents(); + + setupDatabase(); + } + + @Test + public void describe_output_should_match_creation_script() throws Exception { + + CqlSession session = SESSION_RULE.session(); + + KeyspaceMetadata keyspaceMetadata = + session.getMetadata().getKeyspace(SESSION_RULE.keyspace()).orElseThrow(AssertionError::new); + String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); + + assertThat(describeOutput) + .as( + "Describe output doesn't match create statements, " + + "maybe you need to add a new script in integration-tests/src/test/resources. " + + "Server version = %s %s, used script = %s", + CCM_RULE.getDistribution(), serverVersion, scriptFile) + .isEqualTo(scriptContents); + } + + private boolean atLeastVersion(Version dseVersion, Version ossVersion) { + Version comparison = CCM_RULE.isDistributionOf(BackendType.DSE) ? dseVersion : ossVersion; + return serverVersion.compareTo(comparison) >= 0; + } + + @Test + public void keyspace_metadata_should_be_serializable() throws Exception { + + CqlSession session = SESSION_RULE.session(); + + Optional ksOption = + session.getMetadata().getKeyspace(session.getKeyspace().get()); + assertThat(ksOption).isPresent(); + KeyspaceMetadata ks = ksOption.get(); + assertThat(ks).isInstanceOfAny(DefaultKeyspaceMetadata.class, DefaultDseKeyspaceMetadata.class); + + /* Validate that the keyspace metadata is fully populated */ + assertThat(ks.getUserDefinedTypes()).isNotEmpty(); + assertThat(ks.getTables()).isNotEmpty(); + if (atLeastVersion(Version.V5_0_0, Version.V3_0_0)) { + assertThat(ks.getViews()).isNotEmpty(); + } + if (atLeastVersion(Version.V5_0_0, Version.V2_2_0)) { + assertThat(ks.getFunctions()).isNotEmpty(); + assertThat(ks.getAggregates()).isNotEmpty(); + } + + /* A table with an explicit compound primary key + specified clustering column */ + Optional tableOption = ks.getTable("rank_by_year_and_name"); + assertThat(tableOption).isPresent(); + TableMetadata table = tableOption.get(); + assertThat(table).isInstanceOfAny(DefaultTableMetadata.class, DefaultDseTableMetadata.class); + + /* Validate that the table metadata is fully populated */ + assertThat(table.getPartitionKey()).isNotEmpty(); + assertThat(table.getClusteringColumns()).isNotEmpty(); + assertThat(table.getColumns()).isNotEmpty(); + assertThat(table.getOptions()).isNotEmpty(); + assertThat(table.getIndexes()).isNotEmpty(); + + KeyspaceMetadata deserialized = SerializationHelper.serializeAndDeserialize(ks); + assertThat(deserialized).isEqualTo(ks); + } + + /** + * Find a creation script in our test resources that matches the current server version. If we + * don't have an exact match, use the closest version below it. + */ + private static File getScriptFile() { + URL logbackTestUrl = DescribeIT.class.getResource("/logback-test.xml"); + if (logbackTestUrl == null || logbackTestUrl.getFile().isEmpty()) { + fail( + "Expected to use logback-test.xml to determine location of " + + "target/test-classes, but got URL %s", + logbackTestUrl); + } + File resourcesDir = new File(logbackTestUrl.getFile()).getParentFile(); + File scriptsDir = new File(resourcesDir, scriptFileForBackend.get(CCM_RULE.getDistribution())); + LOG.debug("Looking for a matching script in directory {}", scriptsDir); + + File[] candidates = scriptsDir.listFiles(); + assertThat(candidates).isNotNull(); + + File bestFile = null; + Version bestVersion = null; + for (File candidate : candidates) { + String fileName = candidate.getName(); + String candidateVersionString = fileName.substring(0, fileName.lastIndexOf('.')); + Version candidateVersion = Version.parse(candidateVersionString); + LOG.debug("Considering {}, which resolves to version {}", fileName, candidateVersion); + if (candidateVersion.compareTo(serverVersion) > 0) { + LOG.debug("too high, discarding"); + } else if (bestVersion != null && bestVersion.compareTo(candidateVersion) >= 0) { + LOG.debug("not higher than {}, discarding", bestVersion); + } else { + LOG.debug("best so far"); + bestVersion = candidateVersion; + bestFile = candidate; + } + } + assertThat(bestFile) + .as("Could not find create script with version <= %s in %s", serverVersion, scriptsDir) + .isNotNull(); + + LOG.info("Using {} to test against {} {}", bestFile, CCM_RULE.getDistribution(), serverVersion); + return bestFile; + } + + private static String getScriptContents() { + + try { + + return Files.asCharSource(scriptFile, Charsets.UTF_8) + .read() + .trim() + .replaceAll("ks_0", SESSION_RULE.keyspace().asCql(true)); + } catch (IOException ioe) { + fail("Exception reading script file " + scriptFile, ioe); + return null; + } + } + + private static void setupDatabase() { + List statements = STATEMENT_SPLITTER.splitToList(scriptContents); + SchemaChangeSynchronizer.withLock( + () -> { + // Skip the first statement (CREATE KEYSPACE), we already have a keyspace + for (int i = 1; i < statements.size(); i++) { + String statement = statements.get(i); + try { + SESSION_RULE.session().execute(statement); + } catch (Exception e) { + fail("Error executing statement %s (%s)", statement, e); + } + } + }); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/MetadataIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/MetadataIT.java new file mode 100644 index 00000000000..1b1aed4b3de --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/MetadataIT.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metadata; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class MetadataIT { + + private CcmRule ccmRule = CcmRule.getInstance(); + + private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Test + public void should_expose_cluster_name() { + Metadata metadata = sessionRule.session().getMetadata(); + assertThat(metadata.getClusterName()).hasValue(CcmBridge.CLUSTER_NAME); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/Murmur3TokenIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenIT.java similarity index 53% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/Murmur3TokenIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenIT.java index 0009eb29323..a119c503a20 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/Murmur3TokenIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -29,10 +31,17 @@ public class Murmur3TokenIT extends TokenITBase { - private static CustomCcmRule ccmRule = CustomCcmRule.builder().withNodes(3).build(); + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder() + .withNodes(3) + .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("request_timeout_in_ms", 45_000) + .build(); - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withKeyspace(false) .withConfigLoader( SessionUtils.configLoaderBuilder() @@ -40,7 +49,8 @@ public class Murmur3TokenIT extends TokenITBase { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); public Murmur3TokenIT() { super("org.apache.cassandra.dht.Murmur3Partitioner", Murmur3Token.class, false); @@ -48,11 +58,11 @@ public Murmur3TokenIT() { @Override protected CqlSession session() { - return sessionRule.session(); + return SESSION_RULE.session(); } @BeforeClass public static void createSchema() { - TokenITBase.createSchema(sessionRule.session()); + TokenITBase.createSchema(SESSION_RULE.session()); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenVnodesIT.java new file mode 100644 index 00000000000..cb80abc0a3f --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/Murmur3TokenVnodesIT.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metadata; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; +import java.time.Duration; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.CASSANDRA, + maxExclusive = "4.0-beta4", + // TODO Re-enable when CASSANDRA-16364 is fixed + description = "TODO Re-enable when CASSANDRA-16364 is fixed") +public class Murmur3TokenVnodesIT extends TokenITBase { + + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder() + .withNodes(3) + .withCreateOption("--vnodes") + .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("request_timeout_in_ms", 45_000) + .build(); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) + .withKeyspace(false) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + public Murmur3TokenVnodesIT() { + super("org.apache.cassandra.dht.Murmur3Partitioner", Murmur3Token.class, true); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @BeforeClass + public static void createSchema() { + createSchema(SESSION_RULE.session()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/NodeMetadataIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeMetadataIT.java similarity index 56% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/NodeMetadataIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeMetadataIT.java index 0fc9bf3258a..8f5680ff41a 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/NodeMetadataIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeMetadataIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,34 +15,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import com.datastax.dse.driver.api.core.metadata.DseNodeProperties; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.context.EventBus; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.TopologyEvent; import java.net.InetSocketAddress; import java.util.Collection; -import org.junit.ClassRule; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @Category(ParallelizableTests.class) public class NodeMetadataIT { - @ClassRule public static CcmRule ccmRule = CcmRule.getInstance(); + @Rule public CcmRule ccmRule = CcmRule.getInstance(); @Test public void should_expose_node_metadata() { try (CqlSession session = SessionUtils.newSession(ccmRule)) { + Node node = getUniqueNode(session); // Run a few basic checks given what we know about our test environment: assertThat(node.getEndPoint()).isNotNull(); @@ -52,8 +62,9 @@ public void should_expose_node_metadata() { assertThat(node.getListenAddress().get().getAddress()).isEqualTo(connectAddress.getAddress()); assertThat(node.getDatacenter()).isEqualTo("dc1"); assertThat(node.getRack()).isEqualTo("r1"); - if (!CcmBridge.DSE_ENABLEMENT) { - // CcmBridge does not report accurate C* versions for DSE, only approximated values + if (CcmBridge.isDistributionOf(BackendType.CASSANDRA)) { + // CcmBridge does not report accurate C* versions for other distributions (e.g. DSE), only + // approximated values assertThat(node.getCassandraVersion()).isEqualTo(ccmRule.getCassandraVersion()); } assertThat(node.getState()).isSameAs(NodeState.UP); @@ -68,14 +79,40 @@ public void should_expose_node_metadata() { // Force the node down and back up to check that upSinceMillis gets updated EventBus eventBus = ((InternalDriverContext) session.getContext()).getEventBus(); eventBus.fire(TopologyEvent.forceDown(node.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat(() -> node.getState() == NodeState.FORCED_DOWN).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> node.getState() == NodeState.FORCED_DOWN); assertThat(node.getUpSinceMillis()).isEqualTo(-1); eventBus.fire(TopologyEvent.forceUp(node.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat(() -> node.getState() == NodeState.UP).becomesTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> node.getState() == NodeState.UP); assertThat(node.getUpSinceMillis()).isGreaterThan(upTime1); } } + @Test + @BackendRequirement(type = BackendType.DSE, minInclusive = "5.1") + public void should_expose_dse_node_properties() { + try (CqlSession session = SessionUtils.newSession(ccmRule)) { + + Node node = getUniqueNode(session); + + // Basic checks as we want something that will work with a large range of DSE versions: + assertThat(node.getExtras()) + .containsKeys( + DseNodeProperties.DSE_VERSION, + DseNodeProperties.DSE_WORKLOADS, + DseNodeProperties.SERVER_ID); + assertThat(node.getExtras().get(DseNodeProperties.DSE_VERSION)) + .isEqualTo(ccmRule.getDistributionVersion()); + assertThat(node.getExtras().get(DseNodeProperties.SERVER_ID)).isInstanceOf(String.class); + assertThat(node.getExtras().get(DseNodeProperties.DSE_WORKLOADS)).isInstanceOf(Set.class); + } + } + private static Node getUniqueNode(CqlSession session) { Collection nodes = session.getMetadata().getNodes().values(); assertThat(nodes).hasSize(1); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/NodeStateIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeStateIT.java similarity index 86% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/NodeStateIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeStateIT.java index 7032f392853..e468e0a10d7 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/NodeStateIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/NodeStateIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,10 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import static com.datastax.oss.driver.assertions.Assertions.assertThat; import static com.datastax.oss.driver.assertions.Assertions.fail; +import static org.awaitility.Awaitility.await; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -32,12 +35,16 @@ import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; import com.datastax.oss.driver.api.core.loadbalancing.NodeDistance; +import com.datastax.oss.driver.api.core.metadata.EndPoint; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.metadata.DefaultEndPoint; @@ -143,13 +150,13 @@ public void setup() { driverContext.getLoadBalancingPolicy(DriverExecutionProfile.DEFAULT_NAME); // Sanity check: the driver should have connected to simulacron - ConditionChecker.checkThat( + await() + .alias("Connections established") + .pollInterval(500, TimeUnit.MILLISECONDS) + .until( () -> // 1 control connection + 2 pooled connections per node - simulacron.cluster().getActiveConnections() == 5) - .as("Connections established") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + simulacron.cluster().getActiveConnections() == 5); // Find out which node is the control node, and identify the corresponding Simulacron and driver // metadata objects. @@ -189,14 +196,14 @@ public void teardown() { @Test public void should_report_connections_for_healthy_nodes() { - ConditionChecker.checkThat( + await() + .alias("Node metadata up-to-date") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> { assertThat(metadataControlNode).isUp().hasOpenConnections(3).isNotReconnecting(); assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting(); - }) - .as("Node metadata up-to-date") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + }); } @Test @@ -205,11 +212,11 @@ public void should_keep_regular_node_up_when_still_one_connection() { NodeConnectionReport report = simulacronRegularNode.getConnections(); simulacron.cluster().closeConnection(report.getConnections().get(0), CloseType.DISCONNECT); - ConditionChecker.checkThat( - () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(1).isReconnecting()) - .as("Reconnection started") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Reconnection started") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(1).isReconnecting()); inOrder.verify(nodeStateListener, never()).onDown(metadataRegularNode); } @@ -217,11 +224,11 @@ public void should_keep_regular_node_up_when_still_one_connection() { public void should_mark_regular_node_down_when_no_more_connections() { simulacronRegularNode.stop(); - ConditionChecker.checkThat( - () -> assertThat(metadataRegularNode).isDown().hasOpenConnections(0).isReconnecting()) - .as("Node going down") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Node going down") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataRegularNode).isDown().hasOpenConnections(0).isReconnecting()); expect(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, metadataRegularNode)); inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); @@ -239,19 +246,19 @@ public void should_mark_control_node_down_when_control_connection_is_last_connec simulacron.cluster().closeConnection(address, CloseType.DISCONNECT); } } - ConditionChecker.checkThat( - () -> assertThat(metadataControlNode).isUp().hasOpenConnections(1).isReconnecting()) - .as("Control node lost its non-control connections") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Control node lost its non-control connections") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataControlNode).isUp().hasOpenConnections(1).isReconnecting()); inOrder.verify(nodeStateListener, never()).onDown(metadataRegularNode); simulacron.cluster().closeConnection(controlAddress, CloseType.DISCONNECT); - ConditionChecker.checkThat( - () -> assertThat(metadataControlNode).isDown().hasOpenConnections(0).isReconnecting()) - .as("Control node going down") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Control node going down") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataControlNode).isDown().hasOpenConnections(0).isReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataControlNode); expect(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, metadataControlNode)); @@ -261,20 +268,20 @@ public void should_mark_control_node_down_when_control_connection_is_last_connec public void should_bring_node_back_up_when_reconnection_succeeds() { simulacronRegularNode.stop(); - ConditionChecker.checkThat( - () -> assertThat(metadataRegularNode).isDown().hasOpenConnections(0).isReconnecting()) - .as("Node going down") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Node going down") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataRegularNode).isDown().hasOpenConnections(0).isReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); simulacronRegularNode.acceptConnections(); - ConditionChecker.checkThat( - () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting()) - .as("Connections re-established") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Connections re-established") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); expect( @@ -286,45 +293,45 @@ public void should_bring_node_back_up_when_reconnection_succeeds() { public void should_apply_up_and_down_topology_events_when_ignored() { defaultLoadBalancingPolicy.ignore(metadataRegularNode); - ConditionChecker.checkThat( + await() + .alias("Driver closed all connections to ignored node") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> assertThat(metadataRegularNode) .isUp() .isIgnored() .hasOpenConnections(0) - .isNotReconnecting()) - .as("Driver closed all connections to ignored node") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + .isNotReconnecting()); driverContext .getEventBus() .fire(TopologyEvent.suggestDown(metadataRegularNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat( + await() + .alias("SUGGEST_DOWN event applied") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> assertThat(metadataRegularNode) .isDown() .isIgnored() .hasOpenConnections(0) - .isNotReconnecting()) - .as("SUGGEST_DOWN event applied") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + .isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); driverContext .getEventBus() .fire(TopologyEvent.suggestUp(metadataRegularNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat( + await() + .alias("SUGGEST_UP event applied") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> assertThat(metadataRegularNode) .isUp() .isIgnored() .hasOpenConnections(0) - .isNotReconnecting()) - .as("SUGGEST_UP event applied") - .before(10, TimeUnit.MINUTES) - .becomesTrue(); + .isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); defaultLoadBalancingPolicy.stopIgnoring(metadataRegularNode); @@ -366,11 +373,11 @@ public void should_force_immediate_reconnection_when_up_topology_event() localSimulacronNode.stop(); - ConditionChecker.checkThat( - () -> assertThat(localMetadataNode).isDown().hasOpenConnections(0).isReconnecting()) - .as("Node going down") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Node going down") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(localMetadataNode).isDown().hasOpenConnections(0).isReconnecting()); verify(localNodeStateListener, timeout(500)).onDown(localMetadataNode); expect(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, localMetadataNode)); @@ -380,10 +387,10 @@ public void should_force_immediate_reconnection_when_up_topology_event() .getEventBus() .fire(TopologyEvent.suggestUp(localMetadataNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat(() -> assertThat(localMetadataNode).isUp().isNotReconnecting()) - .as("Node coming back up") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Node coming back up") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted(() -> assertThat(localMetadataNode).isUp().isNotReconnecting()); verify(localNodeStateListener, timeout(500).times(2)).onUp(localMetadataNode); expect(NodeStateEvent.changed(NodeState.DOWN, NodeState.UP, localMetadataNode)); @@ -395,15 +402,15 @@ public void should_force_down_when_not_ignored() throws InterruptedException { driverContext .getEventBus() .fire(TopologyEvent.forceDown(metadataRegularNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat( + await() + .alias("Node forced down") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> assertThat(metadataRegularNode) .isForcedDown() .hasOpenConnections(0) - .isNotReconnecting()) - .as("Node forced down") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + .isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); // Should ignore up/down topology events while forced down @@ -423,11 +430,11 @@ public void should_force_down_when_not_ignored() throws InterruptedException { driverContext .getEventBus() .fire(TopologyEvent.forceUp(metadataRegularNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat( - () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting()) - .as("Node forced back up") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + await() + .alias("Node forced back up") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( + () -> assertThat(metadataRegularNode).isUp().hasOpenConnections(2).isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); } @@ -438,15 +445,15 @@ public void should_force_down_when_ignored() throws InterruptedException { driverContext .getEventBus() .fire(TopologyEvent.forceDown(metadataRegularNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat( + await() + .alias("Node forced down") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> assertThat(metadataRegularNode) .isForcedDown() .hasOpenConnections(0) - .isNotReconnecting()) - .as("Node forced down") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + .isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onDown(metadataRegularNode); // Should ignore up/down topology events while forced down @@ -467,16 +474,16 @@ public void should_force_down_when_ignored() throws InterruptedException { driverContext .getEventBus() .fire(TopologyEvent.forceUp(metadataRegularNode.getBroadcastRpcAddress().get())); - ConditionChecker.checkThat( + await() + .alias("Node forced back up") + .pollInterval(500, TimeUnit.MILLISECONDS) + .untilAsserted( () -> assertThat(metadataRegularNode) .isUp() .isIgnored() .hasOpenConnections(0) - .isNotReconnecting()) - .as("Node forced back up") - .before(10, TimeUnit.SECONDS) - .becomesTrue(); + .isNotReconnecting()); inOrder.verify(nodeStateListener, timeout(500)).onUp(metadataRegularNode); defaultLoadBalancingPolicy.stopIgnoring(metadataRegularNode); @@ -596,6 +603,7 @@ public void should_mark_unreachable_contact_point_down() { // Stopped node was tried first and marked down, that's our target scenario verify(localNodeStateListener, timeout(500)).onDown(localMetadataNode2); verify(localNodeStateListener, timeout(500)).onUp(localMetadataNode1); + verify(localNodeStateListener, timeout(500)).onSessionReady(localSession); verifyNoMoreInteractions(localNodeStateListener); return; } else { @@ -617,7 +625,13 @@ private void expect(NodeStateEvent... expectedEvents) { for (NodeStateEvent expected : expectedEvents) { try { NodeStateEvent actual = stateEvents.poll(10, TimeUnit.SECONDS); - assertThat(actual).isEqualTo(expected); + assertThat(actual).isNotNull(); + + // Don't compare events directly: some tests call this method with nodes obtained from + // another session instance, and nodes are compared by reference. + assertThat(actual.oldState).isEqualTo(expected.oldState); + assertThat(actual.newState).isEqualTo(expected.newState); + assertThat(actual.node.getHostId()).isEqualTo(expected.node.getHostId()); } catch (InterruptedException e) { fail("Interrupted while waiting for event"); } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/RandomTokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenIT.java similarity index 53% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/RandomTokenVnodesIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenIT.java index 587b003b27f..603783afb34 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/RandomTokenVnodesIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -27,17 +29,20 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -public class RandomTokenVnodesIT extends TokenITBase { +public class RandomTokenIT extends TokenITBase { - private static CustomCcmRule ccmRule = + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder() .withNodes(3) .withCreateOption("-p RandomPartitioner") - .withCreateOption("--vnodes") + .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("request_timeout_in_ms", 45_000) .build(); - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withKeyspace(false) .withConfigLoader( SessionUtils.configLoaderBuilder() @@ -45,19 +50,20 @@ public class RandomTokenVnodesIT extends TokenITBase { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - public RandomTokenVnodesIT() { - super("org.apache.cassandra.dht.RandomPartitioner", RandomToken.class, true); + public RandomTokenIT() { + super("org.apache.cassandra.dht.RandomPartitioner", RandomToken.class, false); } @Override protected CqlSession session() { - return sessionRule.session(); + return SESSION_RULE.session(); } @BeforeClass public static void createSchema() { - TokenITBase.createSchema(sessionRule.session()); + TokenITBase.createSchema(SESSION_RULE.session()); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenVnodesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenVnodesIT.java new file mode 100644 index 00000000000..683b5651f98 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/RandomTokenVnodesIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metadata; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; +import java.time.Duration; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@BackendRequirement( + type = BackendType.CASSANDRA, + maxExclusive = "4.0-beta4", + // TODO Re-enable when CASSANDRA-16364 is fixed + description = "TODO Re-enable when CASSANDRA-16364 is fixed") +public class RandomTokenVnodesIT extends TokenITBase { + + private static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder() + .withNodes(3) + .withCreateOption("-p RandomPartitioner") + .withCreateOption("--vnodes") + .withCassandraConfiguration("range_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("read_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("write_request_timeout_in_ms", 45_000) + .withCassandraConfiguration("request_timeout_in_ms", 45_000) + .build(); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) + .withKeyspace(false) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + public RandomTokenVnodesIT() { + super("org.apache.cassandra.dht.RandomPartitioner", RandomToken.class, true); + } + + @Override + protected CqlSession session() { + return SESSION_RULE.session(); + } + + @BeforeClass + public static void createSchema() { + TokenITBase.createSchema(SESSION_RULE.session()); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaAgreementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaAgreementIT.java similarity index 72% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaAgreementIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaAgreementIT.java index 80934c5e129..724508d38a3 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaAgreementIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaAgreementIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import static org.assertj.core.api.Assertions.assertThat; @@ -35,9 +37,9 @@ public class SchemaAgreementIT { - private static CustomCcmRule ccm = CustomCcmRule.builder().withNodes(3).build(); - private static SessionRule sessionRule = - SessionRule.builder(ccm) + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withNodes(3).build(); + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) @@ -50,7 +52,8 @@ public class SchemaAgreementIT { .build()) .build(); - @ClassRule public static RuleChain ruleChain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final RuleChain CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @Rule public TestName name = new TestName(); @@ -59,34 +62,34 @@ public void should_succeed_when_all_nodes_agree() { ResultSet result = createTable(); assertThat(result.getExecutionInfo().isSchemaInAgreement()).isTrue(); - assertThat(sessionRule.session().checkSchemaAgreement()).isTrue(); + assertThat(SESSION_RULE.session().checkSchemaAgreement()).isTrue(); } @Test public void should_fail_on_timeout() { - ccm.getCcmBridge().pause(2); + CCM_RULE.getCcmBridge().pause(2); try { // Can't possibly agree since one node is paused. ResultSet result = createTable(); assertThat(result.getExecutionInfo().isSchemaInAgreement()).isFalse(); - assertThat(sessionRule.session().checkSchemaAgreement()).isFalse(); + assertThat(SESSION_RULE.session().checkSchemaAgreement()).isFalse(); } finally { - ccm.getCcmBridge().resume(2); + CCM_RULE.getCcmBridge().resume(2); } } @Test public void should_agree_when_up_nodes_agree() { - ccm.getCcmBridge().stop(2); + CCM_RULE.getCcmBridge().stop(2); try { // Should agree since up hosts should agree. ResultSet result = createTable(); assertThat(result.getExecutionInfo().isSchemaInAgreement()).isTrue(); - assertThat(sessionRule.session().checkSchemaAgreement()).isTrue(); + assertThat(SESSION_RULE.session().checkSchemaAgreement()).isTrue(); } finally { - ccm.getCcmBridge().start(2); + CCM_RULE.getCcmBridge().start(2); } } @@ -98,7 +101,7 @@ public void should_fail_if_timeout_is_zero() { .withDuration( DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT, Duration.ofSeconds(0)) .build(); - try (CqlSession session = SessionUtils.newSession(ccm, sessionRule.keyspace(), loader)) { + try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace(), loader)) { ResultSet result = createTable(session); // Should not agree because schema metadata is disabled @@ -108,7 +111,7 @@ public void should_fail_if_timeout_is_zero() { } private ResultSet createTable() { - return createTable(sessionRule.session()); + return createTable(SESSION_RULE.session()); } private final AtomicInteger tableCounter = new AtomicInteger(); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaChangesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaChangesIT.java similarity index 76% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaChangesIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaChangesIT.java index be1d7ebf16b..85fcfc02cdb 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/SchemaChangesIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaChangesIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,49 +15,58 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.awaitility.Awaitility.await; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; -import com.datastax.oss.driver.categories.ParallelizableTests; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import java.time.Duration; import java.util.List; import java.util.Optional; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; -import org.assertj.core.api.Assertions; import org.junit.Before; -import org.junit.Rule; +import org.junit.ClassRule; import org.junit.Test; -import org.junit.experimental.categories.Category; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -@Category(ParallelizableTests.class) public class SchemaChangesIT { - private CcmRule ccmRule = CcmRule.getInstance(); + static { + CustomCcmRule.Builder builder = CustomCcmRule.builder(); + if (!CcmBridge.isDistributionOf( + BackendType.DSE, (dist, cass) -> cass.nextStable().compareTo(Version.V4_0_0) >= 0)) { + builder.withCassandraConfiguration("enable_materialized_views", true); + } + CCM_RULE = builder.build(); + } + + private static final CustomCcmRule CCM_RULE; // A client that we only use to set up the tests - private SessionRule adminSessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule ADMIN_SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) @@ -63,15 +74,16 @@ public class SchemaChangesIT { .build()) .build(); - @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(adminSessionRule); + @ClassRule + public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(ADMIN_SESSION_RULE); @Before public void setup() { // Always drop and re-create the keyspace to start from a clean state - adminSessionRule + ADMIN_SESSION_RULE .session() - .execute(String.format("DROP KEYSPACE %s", adminSessionRule.keyspace())); - SessionUtils.createKeyspace(adminSessionRule.session(), adminSessionRule.keyspace()); + .execute(String.format("DROP KEYSPACE %s", ADMIN_SESSION_RULE.keyspace())); + SessionUtils.createKeyspace(ADMIN_SESSION_RULE.session(), ADMIN_SESSION_RULE.keyspace()); } @Test @@ -139,19 +151,18 @@ public void should_handle_table_creation() { "CREATE TABLE foo(k int primary key)", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .orElseThrow(IllegalStateException::new) .getTable(CqlIdentifier.fromInternal("foo")), table -> { - assertThat(table.getKeyspace()).isEqualTo(adminSessionRule.keyspace()); + assertThat(table.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); assertThat(table.getName().asInternal()).isEqualTo("foo"); assertThat(table.getColumns()).containsOnlyKeys(CqlIdentifier.fromInternal("k")); assertThat(table.getColumn(CqlIdentifier.fromInternal("k"))) .hasValueSatisfying( k -> { assertThat(k.getType()).isEqualTo(DataTypes.INT); - Assertions.assertThat(table.getPartitionKey()) - .containsExactly(k); + assertThat(table.getPartitionKey()).containsExactly(k); }); assertThat(table.getClusteringColumns()).isEmpty(); }, @@ -165,7 +176,7 @@ public void should_handle_table_drop() { "DROP TABLE foo", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getTable(CqlIdentifier.fromInternal("foo"))), (listener, oldTable) -> verify(listener).onTableDropped(oldTable)); } @@ -177,7 +188,7 @@ public void should_handle_table_update() { "ALTER TABLE foo ADD v int", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getTable(CqlIdentifier.fromInternal("foo"))), newTable -> assertThat(newTable.getColumn(CqlIdentifier.fromInternal("v"))).isPresent(), (listener, oldTable, newTable) -> verify(listener).onTableUpdated(newTable, oldTable)); @@ -190,10 +201,10 @@ public void should_handle_type_creation() { "CREATE TYPE t(i int)", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("t"))), type -> { - assertThat(type.getKeyspace()).isEqualTo(adminSessionRule.keyspace()); + assertThat(type.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); assertThat(type.getName().asInternal()).isEqualTo("t"); assertThat(type.getFieldNames()).containsExactly(CqlIdentifier.fromInternal("i")); assertThat(type.getFieldTypes()).containsExactly(DataTypes.INT); @@ -208,7 +219,7 @@ public void should_handle_type_drop() { "DROP TYPE t", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("t"))), (listener, oldType) -> verify(listener).onUserDefinedTypeDropped(oldType)); } @@ -220,7 +231,7 @@ public void should_handle_type_update() { "ALTER TYPE t ADD j int", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getUserDefinedType(CqlIdentifier.fromInternal("t"))), newType -> assertThat(newType.getFieldNames()) @@ -230,24 +241,27 @@ public void should_handle_type_update() { } @Test - @CassandraRequirement(min = "3.0") public void should_handle_view_creation() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V3_0_0) >= 0) + .isTrue(); should_handle_creation( "CREATE TABLE scores(user text, game text, score int, PRIMARY KEY (user, game))", "CREATE MATERIALIZED VIEW highscores " - + "AS SELECT user, score FROM scores " - + "WHERE game IS NOT NULL AND score IS NOT NULL PRIMARY KEY (game, score, user) " - + "WITH CLUSTERING ORDER BY (score DESC)", + + "AS SELECT game, user, score FROM scores " + + "WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL " + + "PRIMARY KEY (game, score, user) " + + "WITH CLUSTERING ORDER BY (score DESC, user DESC)", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getView(CqlIdentifier.fromInternal("highscores"))), view -> { - assertThat(view.getKeyspace()).isEqualTo(adminSessionRule.keyspace()); + assertThat(view.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); assertThat(view.getName().asInternal()).isEqualTo("highscores"); assertThat(view.getBaseTable().asInternal()).isEqualTo("scores"); assertThat(view.includesAllColumns()).isFalse(); - assertThat(view.getWhereClause()).hasValue("game IS NOT NULL AND score IS NOT NULL"); + assertThat(view.getWhereClause()) + .hasValue("game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL"); assertThat(view.getColumns()) .containsOnlyKeys( CqlIdentifier.fromInternal("game"), @@ -258,37 +272,41 @@ public void should_handle_view_creation() { } @Test - @CassandraRequirement(min = "3.0") public void should_handle_view_drop() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V3_0_0) >= 0) + .isTrue(); should_handle_drop( ImmutableList.of( "CREATE TABLE scores(user text, game text, score int, PRIMARY KEY (user, game))", "CREATE MATERIALIZED VIEW highscores " - + "AS SELECT user, score FROM scores " - + "WHERE game IS NOT NULL AND score IS NOT NULL PRIMARY KEY (game, score, user) " - + "WITH CLUSTERING ORDER BY (score DESC)"), + + "AS SELECT game, user, score FROM scores " + + "WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL " + + "PRIMARY KEY (game, score, user) " + + "WITH CLUSTERING ORDER BY (score DESC, user DESC)"), "DROP MATERIALIZED VIEW highscores", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getView(CqlIdentifier.fromInternal("highscores"))), (listener, oldView) -> verify(listener).onViewDropped(oldView)); } @Test - @CassandraRequirement(min = "3.0") public void should_handle_view_update() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V3_0_0) >= 0) + .isTrue(); should_handle_update( ImmutableList.of( "CREATE TABLE scores(user text, game text, score int, PRIMARY KEY (user, game))", "CREATE MATERIALIZED VIEW highscores " - + "AS SELECT user, score FROM scores " - + "WHERE game IS NOT NULL AND score IS NOT NULL PRIMARY KEY (game, score, user) " - + "WITH CLUSTERING ORDER BY (score DESC)"), + + "AS SELECT game, user, score FROM scores " + + "WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL " + + "PRIMARY KEY (game, score, user) " + + "WITH CLUSTERING ORDER BY (score DESC, user DESC)"), "ALTER MATERIALIZED VIEW highscores WITH comment = 'The best score for each game'", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getView(CqlIdentifier.fromInternal("highscores"))), newView -> assertThat(newView.getOptions().get(CqlIdentifier.fromInternal("comment"))) @@ -297,18 +315,19 @@ public void should_handle_view_update() { } @Test - @CassandraRequirement(min = "2.2") public void should_handle_function_creation() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) + .isTrue(); should_handle_creation( null, "CREATE FUNCTION id(i int) RETURNS NULL ON NULL INPUT RETURNS int " + "LANGUAGE java AS 'return i;'", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getFunction(CqlIdentifier.fromInternal("id"), DataTypes.INT)), function -> { - assertThat(function.getKeyspace()).isEqualTo(adminSessionRule.keyspace()); + assertThat(function.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); assertThat(function.getSignature().getName().asInternal()).isEqualTo("id"); assertThat(function.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); assertThat(function.getReturnType()).isEqualTo(DataTypes.INT); @@ -320,8 +339,9 @@ public void should_handle_function_creation() { } @Test - @CassandraRequirement(min = "2.2") public void should_handle_function_drop() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) + .isTrue(); should_handle_drop( ImmutableList.of( "CREATE FUNCTION id(i int) RETURNS NULL ON NULL INPUT RETURNS int " @@ -329,14 +349,15 @@ public void should_handle_function_drop() { "DROP FUNCTION id", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getFunction(CqlIdentifier.fromInternal("id"), DataTypes.INT)), (listener, oldFunction) -> verify(listener).onFunctionDropped(oldFunction)); } @Test - @CassandraRequirement(min = "2.2") public void should_handle_function_update() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) + .isTrue(); should_handle_update_via_drop_and_recreate( ImmutableList.of( "CREATE FUNCTION id(i int) RETURNS NULL ON NULL INPUT RETURNS int " @@ -346,7 +367,7 @@ public void should_handle_function_update() { + "LANGUAGE java AS 'return j;'", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getFunction(CqlIdentifier.fromInternal("id"), DataTypes.INT)), newFunction -> assertThat(newFunction.getBody()).isEqualTo("return j;"), (listener, oldFunction, newFunction) -> @@ -354,18 +375,19 @@ public void should_handle_function_update() { } @Test - @CassandraRequirement(min = "2.2") public void should_handle_aggregate_creation() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) + .isTrue(); should_handle_creation( "CREATE FUNCTION plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int " + "LANGUAGE java AS 'return i+j;'", "CREATE AGGREGATE sum(int) SFUNC plus STYPE int INITCOND 0", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getAggregate(CqlIdentifier.fromInternal("sum"), DataTypes.INT)), aggregate -> { - assertThat(aggregate.getKeyspace()).isEqualTo(adminSessionRule.keyspace()); + assertThat(aggregate.getKeyspace()).isEqualTo(ADMIN_SESSION_RULE.keyspace()); assertThat(aggregate.getSignature().getName().asInternal()).isEqualTo("sum"); assertThat(aggregate.getSignature().getParameterTypes()).containsExactly(DataTypes.INT); assertThat(aggregate.getStateType()).isEqualTo(DataTypes.INT); @@ -379,8 +401,9 @@ public void should_handle_aggregate_creation() { } @Test - @CassandraRequirement(min = "2.2") public void should_handle_aggregate_drop() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) + .isTrue(); should_handle_drop( ImmutableList.of( "CREATE FUNCTION plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int " @@ -389,14 +412,15 @@ public void should_handle_aggregate_drop() { "DROP AGGREGATE sum", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getAggregate(CqlIdentifier.fromInternal("sum"), DataTypes.INT)), (listener, oldAggregate) -> verify(listener).onAggregateDropped(oldAggregate)); } @Test - @CassandraRequirement(min = "2.2") public void should_handle_aggregate_update() { + assumeThat(CCM_RULE.getCcmBridge().getCassandraVersion().compareTo(Version.V2_2_0) >= 0) + .isTrue(); should_handle_update_via_drop_and_recreate( ImmutableList.of( "CREATE FUNCTION plus(i int, j int) RETURNS NULL ON NULL INPUT RETURNS int " @@ -406,7 +430,7 @@ public void should_handle_aggregate_update() { "CREATE AGGREGATE sum(int) SFUNC plus STYPE int INITCOND 1", metadata -> metadata - .getKeyspace(adminSessionRule.keyspace()) + .getKeyspace(ADMIN_SESSION_RULE.keyspace()) .flatMap(ks -> ks.getAggregate(CqlIdentifier.fromInternal("sum"), DataTypes.INT)), newAggregate -> assertThat(newAggregate.getInitCond()).hasValue(1), (listener, oldAggregate, newAggregate) -> @@ -422,7 +446,7 @@ private void should_handle_creation( CqlIdentifier... keyspaces) { if (beforeStatement != null) { - adminSessionRule.session().execute(beforeStatement); + ADMIN_SESSION_RULE.session().execute(beforeStatement); } SchemaChangeListener listener1 = mock(SchemaChangeListener.class); @@ -444,9 +468,9 @@ private void should_handle_creation( try (CqlSession session1 = SessionUtils.newSession( - ccmRule, adminSessionRule.keyspace(), null, listener1, null, loader); + CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); CqlSession session2 = - SessionUtils.newSession(ccmRule, null, null, listener2, null, loader)) { + SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { session1.execute(createStatement); @@ -456,14 +480,16 @@ private void should_handle_creation( verifyListener.accept(listener1, newElement1); // Refreshes on a server event are asynchronous: - ConditionChecker.checkThat( + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( () -> { T newElement2 = extract.apply(session2.getMetadata()).orElseThrow(AssertionError::new); verifyMetadata.accept(newElement2); verifyListener.accept(listener2, newElement2); - }) - .becomesTrue(); + }); } } @@ -475,7 +501,7 @@ private void should_handle_drop( CqlIdentifier... keyspaces) { for (String statement : beforeStatements) { - adminSessionRule.session().execute(statement); + ADMIN_SESSION_RULE.session().execute(statement); } SchemaChangeListener listener1 = mock(SchemaChangeListener.class); @@ -493,9 +519,9 @@ private void should_handle_drop( try (CqlSession session1 = SessionUtils.newSession( - ccmRule, adminSessionRule.keyspace(), null, listener1, null, loader); + CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); CqlSession session2 = - SessionUtils.newSession(ccmRule, null, null, listener2, null, loader)) { + SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { T oldElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); assertThat(oldElement).isNotNull(); @@ -505,12 +531,14 @@ private void should_handle_drop( assertThat(extract.apply(session1.getMetadata())).isEmpty(); verifyListener.accept(listener1, oldElement); - ConditionChecker.checkThat( + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( () -> { assertThat(extract.apply(session2.getMetadata())).isEmpty(); verifyListener.accept(listener2, oldElement); - }) - .becomesTrue(); + }); } } @@ -523,7 +551,7 @@ private void should_handle_update( CqlIdentifier... keyspaces) { for (String statement : beforeStatements) { - adminSessionRule.session().execute(statement); + ADMIN_SESSION_RULE.session().execute(statement); } SchemaChangeListener listener1 = mock(SchemaChangeListener.class); @@ -540,9 +568,9 @@ private void should_handle_update( try (CqlSession session1 = SessionUtils.newSession( - ccmRule, adminSessionRule.keyspace(), null, listener1, null, loader); + CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); CqlSession session2 = - SessionUtils.newSession(ccmRule, null, null, listener2, null, loader)) { + SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { T oldElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); assertThat(oldElement).isNotNull(); @@ -553,13 +581,15 @@ private void should_handle_update( verifyNewMetadata.accept(newElement); verifyListener.accept(listener1, oldElement, newElement); - ConditionChecker.checkThat( + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( () -> { verifyNewMetadata.accept( extract.apply(session2.getMetadata()).orElseThrow(AssertionError::new)); verifyListener.accept(listener2, oldElement, newElement); - }) - .becomesTrue(); + }); } } @@ -575,7 +605,7 @@ private void should_handle_update_via_drop_and_recreate( CqlIdentifier... keyspaces) { for (String statement : beforeStatements) { - adminSessionRule.session().execute(statement); + ADMIN_SESSION_RULE.session().execute(statement); } SchemaChangeListener listener1 = mock(SchemaChangeListener.class); @@ -591,9 +621,9 @@ private void should_handle_update_via_drop_and_recreate( .build(); try (CqlSession session1 = SessionUtils.newSession( - ccmRule, adminSessionRule.keyspace(), null, listener1, null, loader); + CCM_RULE, ADMIN_SESSION_RULE.keyspace(), null, listener1, null, loader); CqlSession session2 = - SessionUtils.newSession(ccmRule, null, null, listener2, null, loader)) { + SessionUtils.newSession(CCM_RULE, null, null, listener2, null, loader)) { T oldElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); assertThat(oldElement).isNotNull(); @@ -607,23 +637,27 @@ private void should_handle_update_via_drop_and_recreate( session1.setSchemaMetadataEnabled(true); session2.setSchemaMetadataEnabled(true); - ConditionChecker.checkThat( + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( () -> { T newElement = extract.apply(session1.getMetadata()).orElseThrow(AssertionError::new); verifyNewMetadata.accept(newElement); verifyListener.accept(listener1, oldElement, newElement); - }) - .becomesTrue(); + }); - ConditionChecker.checkThat( + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( () -> { T newElement = extract.apply(session2.getMetadata()).orElseThrow(AssertionError::new); verifyNewMetadata.accept(newElement); verifyListener.accept(listener2, oldElement, newElement); - }) - .becomesTrue(); + }); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaIT.java new file mode 100644 index 00000000000..df5571974c1 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/SchemaIT.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metadata; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.TokenMap; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.protocol.internal.util.Bytes; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import org.junit.AssumptionViolatedException; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class SchemaIT { + + private static final Version DSE_MIN_VIRTUAL_TABLES = + Objects.requireNonNull(Version.parse("6.7.0")); + + private final CcmRule ccmRule = CcmRule.getInstance(); + + private final SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Test + public void should_not_expose_system_and_test_keyspace() { + Map keyspaces = + sessionRule.session().getMetadata().getKeyspaces(); + assertThat(keyspaces) + .doesNotContainKeys( + // Don't test exhaustively because system keyspaces depend on the Cassandra version, and + // keyspaces from other tests might also be present + CqlIdentifier.fromInternal("system"), CqlIdentifier.fromInternal("system_traces")); + } + + @Test + public void should_expose_test_keyspace() { + Map keyspaces = + sessionRule.session().getMetadata().getKeyspaces(); + assertThat(keyspaces).containsKey(sessionRule.keyspace()); + } + + @Test + public void should_filter_by_keyspaces() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, + Collections.singletonList(sessionRule.keyspace().asInternal())) + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + + assertThat(session.getMetadata().getKeyspaces()).containsOnlyKeys(sessionRule.keyspace()); + + CqlIdentifier otherKeyspace = SessionUtils.uniqueKeyspaceId(); + SessionUtils.createKeyspace(session, otherKeyspace); + + assertThat(session.getMetadata().getKeyspaces()).containsOnlyKeys(sessionRule.keyspace()); + } + } + + @Test + public void should_not_load_schema_if_disabled_in_config() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + + assertThat(session.isSchemaMetadataEnabled()).isFalse(); + assertThat(session.getMetadata().getKeyspaces()).isEmpty(); + } + } + + @Test + public void should_enable_schema_programmatically_when_disabled_in_config() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + + assertThat(session.isSchemaMetadataEnabled()).isFalse(); + assertThat(session.getMetadata().getKeyspaces()).isEmpty(); + + session.setSchemaMetadataEnabled(true); + assertThat(session.isSchemaMetadataEnabled()).isTrue(); + + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted(() -> assertThat(session.getMetadata().getKeyspaces()).isNotEmpty()); + assertThat(session.getMetadata().getKeyspaces()).containsKey(sessionRule.keyspace()); + + session.setSchemaMetadataEnabled(null); + assertThat(session.isSchemaMetadataEnabled()).isFalse(); + } + } + + @Test + public void should_disable_schema_programmatically_when_enabled_in_config() { + CqlSession session = sessionRule.session(); + session.setSchemaMetadataEnabled(false); + assertThat(session.isSchemaMetadataEnabled()).isFalse(); + + // Create a table, metadata should not be updated + DriverExecutionProfile slowProfile = SessionUtils.slowProfile(session); + sessionRule + .session() + .execute( + SimpleStatement.builder("CREATE TABLE foo(k int primary key)") + .setExecutionProfile(slowProfile) + .build()); + assertThat(session.getMetadata().getKeyspace(sessionRule.keyspace()).get().getTables()) + .doesNotContainKey(CqlIdentifier.fromInternal("foo")); + + // Reset to config value (true), should refresh and load the new table + session.setSchemaMetadataEnabled(null); + assertThat(session.isSchemaMetadataEnabled()).isTrue(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .untilAsserted( + () -> + assertThat( + session.getMetadata().getKeyspace(sessionRule.keyspace()).get().getTables()) + .containsKey(CqlIdentifier.fromInternal("foo"))); + } + + @Test + public void should_refresh_schema_manually() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.METADATA_SCHEMA_ENABLED, false) + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + + assertThat(session.isSchemaMetadataEnabled()).isFalse(); + assertThat(session.getMetadata().getKeyspaces()).isEmpty(); + + Metadata newMetadata = session.refreshSchema(); + assertThat(newMetadata.getKeyspaces()).containsKey(sessionRule.keyspace()); + + assertThat(session.getMetadata()).isSameAs(newMetadata); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "4.0", + description = "virtual tables introduced in 4.0") + @Test + public void should_get_virtual_metadata() { + skipIfDse60(); + + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, + Collections.singletonList("system_views")) + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + + Metadata md = session.getMetadata(); + KeyspaceMetadata kmd = md.getKeyspace("system_views").get(); + + // Keyspace name should be set, marked as virtual, and have at least sstable_tasks table. + // All other values should be defaulted since they are not defined in the virtual schema + // tables. + assertThat(kmd.getTables().size()).isGreaterThanOrEqualTo(1); + assertThat(kmd.isVirtual()).isTrue(); + assertThat(kmd.isDurableWrites()).isFalse(); + assertThat(kmd.getName().asCql(true)).isEqualTo("system_views"); + + // Virtual tables lack User Types, Functions, Views and Aggregates + assertThat(kmd.getUserDefinedTypes().size()).isEqualTo(0); + assertThat(kmd.getFunctions().size()).isEqualTo(0); + assertThat(kmd.getViews().size()).isEqualTo(0); + assertThat(kmd.getAggregates().size()).isEqualTo(0); + + assertThat(kmd.describe(true)) + .isEqualTo( + "/* VIRTUAL KEYSPACE system_views WITH replication = { 'class' : 'null' } " + + "AND durable_writes = false; */"); + // Table name should be set, marked as virtual, and it should have columns set. + // indexes, views, clustering column, clustering order and id are not defined in the virtual + // schema tables. + TableMetadata tm = kmd.getTable("sstable_tasks").get(); + assertThat(tm).isNotNull(); + assertThat(tm.getName().toString()).isEqualTo("sstable_tasks"); + assertThat(tm.isVirtual()).isTrue(); + // DSE 6.8+ reports 7 columns, Cassandra 4+ reports 8 columns + assertThat(tm.getColumns().size()).isGreaterThanOrEqualTo(7); + assertThat(tm.getIndexes().size()).isEqualTo(0); + assertThat(tm.getPartitionKey().size()).isEqualTo(1); + assertThat(tm.getPartitionKey().get(0).getName().toString()).isEqualTo("keyspace_name"); + assertThat(tm.getClusteringColumns().size()).isEqualTo(2); + assertThat(tm.getId().isPresent()).isFalse(); + assertThat(tm.getOptions().size()).isEqualTo(0); + assertThat(tm.getKeyspace()).isEqualTo(kmd.getName()); + assertThat(tm.describe(true)) + .isIn( + // DSE 6.8+ + "/* VIRTUAL TABLE system_views.sstable_tasks (\n" + + " keyspace_name text,\n" + + " table_name text,\n" + + " task_id uuid,\n" + + " kind text,\n" + + " progress bigint,\n" + + " total bigint,\n" + + " unit text,\n" + + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" + + "); */", + // Cassandra 4.0 + "/* VIRTUAL TABLE system_views.sstable_tasks (\n" + + " keyspace_name text,\n" + + " table_name text,\n" + + " task_id uuid,\n" + + " completion_ratio double,\n" + + " kind text,\n" + + " progress bigint,\n" + + " total bigint,\n" + + " unit text,\n" + + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" + + "); */", + // Cassandra 4.1 + "/* VIRTUAL TABLE system_views.sstable_tasks (\n" + + " keyspace_name text,\n" + + " table_name text,\n" + + " task_id timeuuid,\n" + + " completion_ratio double,\n" + + " kind text,\n" + + " progress bigint,\n" + + " sstables int,\n" + + " total bigint,\n" + + " unit text,\n" + + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" + + "); */", + // Cassandra 5.0 + "/* VIRTUAL TABLE system_views.sstable_tasks (\n" + + " keyspace_name text,\n" + + " table_name text,\n" + + " task_id timeuuid,\n" + + " completion_ratio double,\n" + + " kind text,\n" + + " progress bigint,\n" + + " sstables int,\n" + + " target_directory text,\n" + + " total bigint,\n" + + " unit text,\n" + + " PRIMARY KEY (keyspace_name, table_name, task_id)\n" + + "); */"); + // ColumnMetadata is as expected + ColumnMetadata cm = tm.getColumn("progress").get(); + assertThat(cm).isNotNull(); + assertThat(cm.getParent()).isEqualTo(tm.getName()); + assertThat(cm.getType()).isEqualTo(DataTypes.BIGINT); + assertThat(cm.getName().toString()).isEqualTo("progress"); + } + } + + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "4.0", + description = "virtual tables introduced in 4.0") + @Test + public void should_exclude_virtual_keyspaces_from_token_map() { + skipIfDse60(); + + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, + Arrays.asList( + "system_views", "system_virtual_schema", sessionRule.keyspace().asInternal())) + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + Metadata metadata = session.getMetadata(); + Map keyspaces = metadata.getKeyspaces(); + assertThat(keyspaces) + .containsKey(CqlIdentifier.fromCql("system_views")) + .containsKey(CqlIdentifier.fromCql("system_virtual_schema")); + + TokenMap tokenMap = metadata.getTokenMap().orElseThrow(AssertionError::new); + ByteBuffer partitionKey = Bytes.fromHexString("0x00"); // value does not matter + assertThat(tokenMap.getReplicas("system_views", partitionKey)).isEmpty(); + assertThat(tokenMap.getReplicas("system_virtual_schema", partitionKey)).isEmpty(); + // Check that a non-virtual keyspace is present + assertThat(tokenMap.getReplicas(sessionRule.keyspace(), partitionKey)).isNotEmpty(); + } + } + + private void skipIfDse60() { + // Special case: DSE 6.0 reports C* 4.0 but does not support virtual tables + if (!ccmRule.isDistributionOf( + BackendType.DSE, (dist, cass) -> dist.compareTo(DSE_MIN_VIRTUAL_TABLES) >= 0)) { + throw new AssumptionViolatedException("DSE 6.0 does not support virtual tables"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/TokenITBase.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/TokenITBase.java similarity index 86% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/TokenITBase.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/TokenITBase.java index 2fa682ccc2b..057461a1bd7 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/metadata/TokenITBase.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metadata/TokenITBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.metadata; +package com.datastax.oss.driver.core.metadata; import static org.assertj.core.api.Assertions.assertThat; @@ -25,6 +27,8 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.TokenMap; import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.api.core.metadata.token.TokenRange; import com.datastax.oss.driver.api.core.session.Session; @@ -100,6 +104,7 @@ public void should_be_consistent_with_range_queries() { int key = 1; ProtocolVersion protocolVersion = session().getContext().getProtocolVersion(); ByteBuffer serializedKey = TypeCodecs.INT.encodePrimitive(key, protocolVersion); + assertThat(serializedKey).isNotNull(); Set replicas = tokenMap.getReplicas(KS1, serializedKey); assertThat(replicas).hasSize(1); Node replica = replicas.iterator().next(); @@ -131,7 +136,7 @@ public void should_be_consistent_with_range_queries() { private List rangeQuery(PreparedStatement rangeStatement, TokenRange range) { List rows = Lists.newArrayList(); for (TokenRange subRange : range.unwrap()) { - Statement statement = rangeStatement.bind(subRange.getStart(), subRange.getEnd()); + Statement statement = rangeStatement.bind(subRange.getStart(), subRange.getEnd()); session().execute(statement).forEach(rows::add); } return rows; @@ -154,10 +159,11 @@ private List rangeQuery(PreparedStatement rangeStatement, TokenRange range) public void should_get_token_from_row_and_set_token_in_query() { ResultSet rs = session().execute("SELECT token(i) FROM foo WHERE i = 1"); Row row = rs.one(); + assertThat(row).isNotNull(); // Get by index: Token token = row.getToken(0); - assertThat(token).isInstanceOf(expectedTokenType); + assertThat(token).isNotNull().isInstanceOf(expectedTokenType); // Get by name: the generated column name depends on the Cassandra version. String tokenColumnName = @@ -171,10 +177,12 @@ public void should_get_token_from_row_and_set_token_in_query() { // Bind with setToken by index row = session().execute(pst.bind().setToken(0, token)).one(); + assertThat(row).isNotNull(); assertThat(row.getInt(0)).isEqualTo(1); // Bind with setToken by name row = session().execute(pst.bind().setToken("partition key token", token)).one(); + assertThat(row).isNotNull(); assertThat(row.getInt(0)).isEqualTo(1); } @@ -188,17 +196,20 @@ public void should_get_token_from_row_and_set_token_in_query() { @Test public void should_get_token_from_row_and_set_token_in_query_with_binding_and_aliasing() { Row row = session().execute("SELECT token(i) AS t FROM foo WHERE i = 1").one(); + assertThat(row).isNotNull(); Token token = row.getToken("t"); - assertThat(token).isInstanceOf(expectedTokenType); + assertThat(token).isNotNull().isInstanceOf(expectedTokenType); PreparedStatement pst = session().prepare("SELECT * FROM foo WHERE token(i) = :myToken"); row = session().execute(pst.bind().setToken("myToken", token)).one(); + assertThat(row).isNotNull(); assertThat(row.getInt(0)).isEqualTo(1); row = session() .execute(SimpleStatement.newInstance("SELECT * FROM foo WHERE token(i) = ?", token)) .one(); + assertThat(row).isNotNull(); assertThat(row.getInt(0)).isEqualTo(1); } @@ -214,6 +225,7 @@ public void should_get_token_from_row_and_set_token_in_query_with_binding_and_al @Test(expected = IllegalArgumentException.class) public void should_raise_exception_when_getting_token_on_non_token_column() { Row row = session().execute("SELECT i FROM foo WHERE i = 1").one(); + assertThat(row).isNotNull(); row.getToken(0); } @@ -235,11 +247,13 @@ public void should_expose_consistent_ranges() { } private void checkRanges(Session session) { + assertThat(session.getMetadata().getTokenMap()).isPresent(); TokenMap tokenMap = session.getMetadata().getTokenMap().get(); checkRanges(tokenMap.getTokenRanges()); } private void checkRanges(Session session, CqlIdentifier keyspace, int replicationFactor) { + assertThat(session.getMetadata().getTokenMap()).isPresent(); TokenMap tokenMap = session.getMetadata().getTokenMap().get(); List allRangesWithDuplicates = Lists.newArrayList(); @@ -248,17 +262,27 @@ private void checkRanges(Session session, CqlIdentifier keyspace, int replicatio Set hostRanges = tokenMap.getTokenRanges(keyspace, node); // Special case: When using vnodes the tokens are not evenly assigned to each replica. if (!useVnodes) { - assertThat(hostRanges).hasSize(replicationFactor * tokensPerNode); + assertThat(hostRanges) + .as( + "Node %s: expected %d ranges, got %d", + node, replicationFactor * tokensPerNode, hostRanges.size()) + .hasSize(replicationFactor * tokensPerNode); } allRangesWithDuplicates.addAll(hostRanges); } // Special case check for vnodes to ensure that total number of replicated ranges is correct. - assertThat(allRangesWithDuplicates).hasSize(3 * tokensPerNode * replicationFactor); + assertThat(allRangesWithDuplicates) + .as( + "Expected %d total replicated ranges with duplicates, got %d", + 3 * replicationFactor * tokensPerNode, allRangesWithDuplicates.size()) + .hasSize(3 * replicationFactor * tokensPerNode); // Once we ignore duplicates, the number of ranges should match the number of nodes. Set allRanges = new TreeSet<>(allRangesWithDuplicates); - assertThat(allRanges).hasSize(3 * tokensPerNode); + assertThat(allRanges) + .as("Expected %d total replicated ranges, got %d", 3 * tokensPerNode, allRanges.size()) + .hasSize(3 * tokensPerNode); // And the ranges should cover the whole ring and no ranges intersect. checkRanges(allRanges); @@ -267,7 +291,7 @@ private void checkRanges(Session session, CqlIdentifier keyspace, int replicatio // Ensures that no ranges intersect and that they cover the entire ring. private void checkRanges(Collection ranges) { // Ensure no ranges intersect. - TokenRange[] rangesArray = ranges.toArray(new TokenRange[ranges.size()]); + TokenRange[] rangesArray = ranges.toArray(new TokenRange[0]); for (int i = 0; i < rangesArray.length; i++) { TokenRange rangeI = rangesArray[i]; for (int j = i + 1; j < rangesArray.length; j++) { @@ -335,6 +359,7 @@ public void should_create_token_from_partition_key() { TokenMap tokenMap = getTokenMap(); Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); + assertThat(row).isNotNull(); Token expected = row.getToken(0); ProtocolVersion protocolVersion = session().getContext().getProtocolVersion(); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/DropwizardMetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/DropwizardMetricsIT.java new file mode 100644 index 00000000000..e0184516e21 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/DropwizardMetricsIT.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.codahale.metrics.Counter; +import com.codahale.metrics.Gauge; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.Timer; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import java.util.ArrayList; +import java.util.List; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class DropwizardMetricsIT extends MetricsITBase { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); + + @Override + protected SimulacronRule simulacron() { + return SIMULACRON_RULE; + } + + @Override + protected MetricRegistry newMetricRegistry() { + return new MetricRegistry(); + } + + @Override + protected String getMetricsFactoryClass() { + return "DropwizardMetricsFactory"; + } + + @Override + protected void assertMetricsPresent(CqlSession session) { + + MetricRegistry registry = + (MetricRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); + assertThat(registry).isNotNull(); + + assertThat(registry.getMetrics()) + .hasSize(ENABLED_SESSION_METRICS.size() + ENABLED_NODE_METRICS.size() * 3); + + MetricIdGenerator metricIdGenerator = + ((InternalDriverContext) session.getContext()).getMetricIdGenerator(); + + assertThat(session.getMetrics()).isPresent(); + Metrics metrics = session.getMetrics().get(); + + for (DefaultSessionMetric metric : ENABLED_SESSION_METRICS) { + + MetricId id = metricIdGenerator.sessionMetricId(metric); + Metric m = registry.getMetrics().get(id.getName()); + assertThat(m).isNotNull(); + + // assert that the same metric is retrievable through the registry and through the driver API + assertThat(metrics.getSessionMetric(metric)) + .isPresent() + .hasValueSatisfying(v -> assertThat(v).isSameAs(m)); + + switch (metric) { + case CONNECTED_NODES: + assertThat(m).isInstanceOf(Gauge.class); + assertThat((Integer) ((Gauge) m).getValue()).isEqualTo(3); + break; + case CQL_REQUESTS: + assertThat(m).isInstanceOf(Timer.class); + await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(30)); + break; + case CQL_PREPARED_CACHE_SIZE: + assertThat(m).isInstanceOf(Gauge.class); + assertThat((Long) ((Gauge) m).getValue()).isOne(); + break; + case BYTES_SENT: + case BYTES_RECEIVED: + assertThat(m).isInstanceOf(Meter.class); + assertThat(((Meter) m).getCount()).isGreaterThan(0); + break; + case CQL_CLIENT_TIMEOUTS: + case THROTTLING_ERRORS: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).getCount()).isZero(); + break; + case THROTTLING_DELAY: + assertThat(m).isInstanceOf(Timer.class); + assertThat(((Timer) m).getCount()).isZero(); + break; + case THROTTLING_QUEUE_SIZE: + assertThat(m).isInstanceOf(Gauge.class); + assertThat((Integer) ((Gauge) m).getValue()).isZero(); + break; + } + } + + for (Node node : session.getMetadata().getNodes().values()) { + + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + + MetricId id = metricIdGenerator.nodeMetricId(node, metric); + Metric m = registry.getMetrics().get(id.getName()); + assertThat(m).isNotNull(); + + // assert that the same metric is retrievable through the registry and through the driver + // API + assertThat(metrics.getNodeMetric(node, metric)) + .isPresent() + .hasValueSatisfying(v -> assertThat(v).isSameAs(m)); + + switch (metric) { + case OPEN_CONNECTIONS: + assertThat(m).isInstanceOf(Gauge.class); + // control node has 2 connections + assertThat((Integer) ((Gauge) m).getValue()).isBetween(1, 2); + break; + case CQL_MESSAGES: + assertThat(m).isInstanceOf(Timer.class); + await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(10)); + break; + case READ_TIMEOUTS: + case WRITE_TIMEOUTS: + case UNAVAILABLES: + case OTHER_ERRORS: + case ABORTED_REQUESTS: + case UNSENT_REQUESTS: + case RETRIES: + case IGNORES: + case RETRIES_ON_READ_TIMEOUT: + case RETRIES_ON_WRITE_TIMEOUT: + case RETRIES_ON_UNAVAILABLE: + case RETRIES_ON_OTHER_ERROR: + case RETRIES_ON_ABORTED: + case IGNORES_ON_READ_TIMEOUT: + case IGNORES_ON_WRITE_TIMEOUT: + case IGNORES_ON_UNAVAILABLE: + case IGNORES_ON_OTHER_ERROR: + case IGNORES_ON_ABORTED: + case SPECULATIVE_EXECUTIONS: + case CONNECTION_INIT_ERRORS: + case AUTHENTICATION_ERRORS: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).getCount()).isZero(); + break; + case BYTES_SENT: + case BYTES_RECEIVED: + assertThat(m).isInstanceOf(Meter.class); + assertThat(((Meter) m).getCount()).isGreaterThan(0L); + break; + case AVAILABLE_STREAMS: + case IN_FLIGHT: + case ORPHANED_STREAMS: + assertThat(m).isInstanceOf(Gauge.class); + break; + } + } + } + } + + @Override + protected void assertNodeMetricsNotEvicted(CqlSession session, Node node) { + InternalDriverContext context = (InternalDriverContext) session.getContext(); + MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); + assertThat(registry).isNotNull(); + for (String id : nodeMetricIds(context, node)) { + assertThat(registry.getMetrics()).containsKey(id); + } + } + + @Override + protected void assertMetricsNotPresent(Object registry) { + MetricRegistry dropwizardRegistry = (MetricRegistry) registry; + assertThat(dropwizardRegistry.getMetrics()).isEmpty(); + } + + @Override + protected void assertNodeMetricsEvicted(CqlSession session, Node node) { + InternalDriverContext context = (InternalDriverContext) session.getContext(); + MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); + assertThat(registry).isNotNull(); + for (String id : nodeMetricIds(context, node)) { + assertThat(registry.getMetrics()).doesNotContainKey(id); + } + } + + private List nodeMetricIds(InternalDriverContext context, Node node) { + List ids = new ArrayList<>(); + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); + ids.add(id.getName()); + } + return ids; + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/MetricsITBase.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/MetricsITBase.java new file mode 100644 index 00000000000..e6121217619 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/metrics/MetricsITBase.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.codahale.metrics.MetricRegistry; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.internal.core.context.EventBus; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.DefaultNode; +import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; +import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.DefaultMetricIdGenerator; +import com.datastax.oss.driver.internal.core.metrics.TaggingMetricIdGenerator; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public abstract class MetricsITBase { + + protected static final List ENABLED_SESSION_METRICS = + Arrays.asList(DefaultSessionMetric.values()); + + protected static final List ENABLED_NODE_METRICS = + Arrays.asList(DefaultNodeMetric.values()); + + protected abstract SimulacronRule simulacron(); + + protected abstract Object newMetricRegistry(); + + protected abstract String getMetricsFactoryClass(); + + protected abstract void assertMetricsPresent(CqlSession session); + + protected abstract void assertNodeMetricsEvicted(CqlSession session, Node node) throws Exception; + + protected abstract void assertNodeMetricsNotEvicted(CqlSession session, Node node) + throws Exception; + + @Before + public void resetSimulacron() { + simulacron().cluster().clearLogs(); + simulacron().cluster().clearPrimes(true); + } + + @Test + @UseDataProvider("descriptorsAndPrefixes") + public void should_expose_metrics_if_enabled_and_clear_metrics_if_closed( + Class metricIdGenerator, String prefix) { + + Object registry = newMetricRegistry(); + Assume.assumeFalse( + "Cannot use metric tags with Dropwizard", + metricIdGenerator.getSimpleName().contains("Tagging") + && getMetricsFactoryClass().contains("Dropwizard")); + + DriverConfigLoader loader = + allMetricsEnabled() + .withString( + DefaultDriverOption.METRICS_ID_GENERATOR_CLASS, metricIdGenerator.getSimpleName()) + .withString(DefaultDriverOption.METRICS_ID_GENERATOR_PREFIX, prefix) + .build(); + + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(simulacron().getContactPoints()) + .withConfigLoader(loader) + .withMetricRegistry(registry) + .build()) { + + session.prepare("irrelevant"); + queryAllNodes(session); + assertMetricsPresent(session); + } finally { + assertMetricsNotPresent(registry); + } + } + + @DataProvider + public static Object[][] descriptorsAndPrefixes() { + return new Object[][] { + new Object[] {DefaultMetricIdGenerator.class, ""}, + new Object[] {DefaultMetricIdGenerator.class, "cassandra"}, + new Object[] {TaggingMetricIdGenerator.class, ""}, + new Object[] {TaggingMetricIdGenerator.class, "cassandra"}, + }; + } + + @Test + public void should_not_expose_metrics_if_disabled() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withStringList(DefaultDriverOption.METRICS_SESSION_ENABLED, Collections.emptyList()) + .withStringList(DefaultDriverOption.METRICS_NODE_ENABLED, Collections.emptyList()) + .withString(DefaultDriverOption.METRICS_FACTORY_CLASS, getMetricsFactoryClass()) + .build(); + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(simulacron().getContactPoints()) + .withConfigLoader(loader) + .build()) { + queryAllNodes(session); + MetricRegistry registry = + (MetricRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); + assertThat(registry).isNull(); + assertThat(session.getMetrics()).isEmpty(); + } + } + + @Test + public void should_evict_down_node_metrics_when_timeout_fires() throws Exception { + // given + Duration expireAfter = Duration.ofSeconds(1); + DriverConfigLoader loader = + allMetricsEnabled() + .withDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, expireAfter) + .build(); + + AbstractMetricUpdater.MIN_EXPIRE_AFTER = expireAfter; + + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(simulacron().getContactPoints()) + .withConfigLoader(loader) + .withMetricRegistry(newMetricRegistry()) + .build()) { + + queryAllNodes(session); + + DefaultNode node1 = findNode(session, 0); + DefaultNode node2 = findNode(session, 1); + DefaultNode node3 = findNode(session, 2); + + EventBus eventBus = ((InternalDriverContext) session.getContext()).getEventBus(); + + // trigger node1 UP -> DOWN + eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); + + Thread.sleep(expireAfter.toMillis()); + + // then node-level metrics should be evicted from node1, but + // node2 and node3 metrics should not have been evicted + await().untilAsserted(() -> assertNodeMetricsEvicted(session, node1)); + assertNodeMetricsNotEvicted(session, node2); + assertNodeMetricsNotEvicted(session, node3); + + } finally { + AbstractMetricUpdater.MIN_EXPIRE_AFTER = Duration.ofMinutes(5); + } + } + + @Test + public void should_not_evict_down_node_metrics_when_node_is_back_up_before_timeout() + throws Exception { + // given + Duration expireAfter = Duration.ofSeconds(2); + DriverConfigLoader loader = + allMetricsEnabled() + .withDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER, expireAfter) + .build(); + + AbstractMetricUpdater.MIN_EXPIRE_AFTER = expireAfter; + + try (CqlSession session = + CqlSession.builder() + .addContactEndPoints(simulacron().getContactPoints()) + .withConfigLoader(loader) + .withMetricRegistry(newMetricRegistry()) + .build()) { + + queryAllNodes(session); + + DefaultNode node1 = findNode(session, 0); + DefaultNode node2 = findNode(session, 1); + DefaultNode node3 = findNode(session, 2); + + EventBus eventBus = ((InternalDriverContext) session.getContext()).getEventBus(); + + // trigger nodes UP -> DOWN + eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.DOWN, node1)); + eventBus.fire(NodeStateEvent.changed(NodeState.UP, NodeState.FORCED_DOWN, node2)); + eventBus.fire(NodeStateEvent.removed(node3)); + + Thread.sleep(500); + + // trigger nodes DOWN -> UP, should cancel the timeouts + eventBus.fire(NodeStateEvent.changed(NodeState.DOWN, NodeState.UP, node1)); + eventBus.fire(NodeStateEvent.changed(NodeState.FORCED_DOWN, NodeState.UP, node2)); + eventBus.fire(NodeStateEvent.added(node3)); + + Thread.sleep(expireAfter.toMillis()); + + // then no node-level metrics should be evicted + assertNodeMetricsNotEvicted(session, node1); + assertNodeMetricsNotEvicted(session, node2); + assertNodeMetricsNotEvicted(session, node3); + + } finally { + AbstractMetricUpdater.MIN_EXPIRE_AFTER = Duration.ofMinutes(5); + } + } + + private ProgrammaticDriverConfigLoaderBuilder allMetricsEnabled() { + return SessionUtils.configLoaderBuilder() + .withStringList( + DefaultDriverOption.METRICS_SESSION_ENABLED, + ENABLED_SESSION_METRICS.stream() + .map(DefaultSessionMetric::getPath) + .collect(Collectors.toList())) + .withStringList( + DefaultDriverOption.METRICS_NODE_ENABLED, + ENABLED_NODE_METRICS.stream() + .map(DefaultNodeMetric::getPath) + .collect(Collectors.toList())) + .withString(DefaultDriverOption.METRICS_FACTORY_CLASS, getMetricsFactoryClass()); + } + + private void queryAllNodes(CqlSession session) { + for (Node node : session.getMetadata().getNodes().values()) { + for (int i = 0; i < 10; i++) { + session.execute(SimpleStatement.newInstance("irrelevant").setNode(node)); + } + } + } + + private DefaultNode findNode(CqlSession session, int id) { + InetSocketAddress address1 = simulacron().cluster().node(id).inetSocketAddress(); + return (DefaultNode) + session.getMetadata().findNode(address1).orElseThrow(IllegalStateException::new); + } + + protected abstract void assertMetricsNotPresent(Object registry); +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/ConsistencyDowngradingRetryPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/ConsistencyDowngradingRetryPolicyIT.java new file mode 100644 index 00000000000..0cab12c7fc4 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/ConsistencyDowngradingRetryPolicyIT.java @@ -0,0 +1,1328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.retry; + +import static com.datastax.oss.simulacron.common.codec.WriteType.BATCH_LOG; +import static com.datastax.oss.simulacron.common.codec.WriteType.UNLOGGED_BATCH; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.closeConnection; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readFailure; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.readTimeout; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.writeFailure; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.writeTimeout; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.after; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.Appender; +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.connection.ClosedConnectionException; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.retry.RetryVerdict; +import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType; +import com.datastax.oss.driver.api.core.servererrors.ReadFailureException; +import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException; +import com.datastax.oss.driver.api.core.servererrors.ServerError; +import com.datastax.oss.driver.api.core.servererrors.UnavailableException; +import com.datastax.oss.driver.api.core.servererrors.WriteFailureException; +import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException; +import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryPolicy; +import com.datastax.oss.driver.internal.core.retry.ConsistencyDowngradingRetryVerdict; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; +import com.datastax.oss.simulacron.common.codec.WriteType; +import com.datastax.oss.simulacron.common.request.Query; +import com.datastax.oss.simulacron.common.request.Request; +import com.datastax.oss.simulacron.common.stubbing.CloseType; +import com.datastax.oss.simulacron.common.stubbing.DisconnectAction; +import com.datastax.oss.simulacron.server.BoundNode; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import java.net.SocketAddress; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.slf4j.LoggerFactory; +import org.slf4j.helpers.MessageFormatter; + +@RunWith(DataProviderRunner.class) +@Category(ParallelizableTests.class) +public class ConsistencyDowngradingRetryPolicyIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); + + public @Rule SessionRule sessionRule = + SessionRule.builder(SIMULACRON_RULE) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) + .withClass( + DefaultDriverOption.RETRY_POLICY_CLASS, + ConsistencyDowngradingRetryPolicy.class) + .withClass( + DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, + SortingLoadBalancingPolicy.class) + .build()) + .build(); + + private static final String QUERY_STR = "irrelevant"; + + private static final Request QUERY_LOCAL_QUORUM = + new Query(QUERY_STR, ImmutableList.of(ConsistencyLevel.LOCAL_QUORUM), null, null); + + private static final Request QUERY_ONE = + new Query(QUERY_STR, ImmutableList.of(ConsistencyLevel.ONE), null, null); + + private static final Request QUERY_LOCAL_SERIAL = + new Query(QUERY_STR, ImmutableList.of(ConsistencyLevel.LOCAL_SERIAL), null, null); + + private static final SimpleStatement STATEMENT_LOCAL_QUORUM = + SimpleStatement.builder(QUERY_STR) + .setConsistencyLevel(DefaultConsistencyLevel.LOCAL_QUORUM) + .build(); + + private static final SimpleStatement STATEMENT_LOCAL_SERIAL = + SimpleStatement.builder(QUERY_STR) + .setConsistencyLevel(DefaultConsistencyLevel.LOCAL_SERIAL) + .build(); + + private final QueryCounter localQuorumCounter = + QueryCounter.builder(SIMULACRON_RULE.cluster()) + .withFilter( + (l) -> + l.getQuery().equals(QUERY_STR) + && l.getConsistency().equals(ConsistencyLevel.LOCAL_QUORUM)) + .build(); + + private final QueryCounter oneCounter = + QueryCounter.builder(SIMULACRON_RULE.cluster()) + .withFilter( + (l) -> + l.getQuery().equals(QUERY_STR) && l.getConsistency().equals(ConsistencyLevel.ONE)) + .build(); + + private final QueryCounter localSerialCounter = + QueryCounter.builder(SIMULACRON_RULE.cluster()) + .withFilter( + (l) -> + l.getQuery().equals(QUERY_STR) + && l.getConsistency().equals(ConsistencyLevel.LOCAL_SERIAL)) + .build(); + + private ArgumentCaptor loggingEventCaptor; + private Appender appender; + private Logger logger; + private Level oldLevel; + private String logPrefix; + private BoundNode node0; + private BoundNode node1; + + @Before + public void setup() { + loggingEventCaptor = ArgumentCaptor.forClass(ILoggingEvent.class); + @SuppressWarnings("unchecked") + Appender appender = (Appender) mock(Appender.class); + this.appender = appender; + logger = (Logger) LoggerFactory.getLogger(ConsistencyDowngradingRetryPolicy.class); + oldLevel = logger.getLevel(); + logger.setLevel(Level.TRACE); + logger.addAppender(appender); + // the log prefix we expect in retry logging messages. + logPrefix = sessionRule.session().getName() + "|default"; + // clear activity logs and primes between tests since simulacron instance is shared. + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); + node0 = SIMULACRON_RULE.cluster().node(0); + node1 = SIMULACRON_RULE.cluster().node(1); + } + + @After + public void teardown() { + logger.detachAppender(appender); + logger.setLevel(oldLevel); + } + + @Test + public void should_rethrow_on_read_timeout_when_enough_responses_and_data_present() { + // given a node that will respond to query with a read timeout where data is present and enough + // replicas replied. + node0.prime( + when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 2, 2, true))); + + try { + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("expected a ReadTimeoutException"); + } catch (ReadTimeoutException rte) { + // then an exception should have been thrown + assertThat(rte) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (timeout while waiting for repair of inconsistent replica)"); + assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rte.getReceived()).isEqualTo(2); + assertThat(rte.getBlockFor()).isEqualTo(2); + assertThat(rte.wasDataPresent()).isTrue(); + // should not have been retried + List> errors = rte.getExecutionInfo().getErrors(); + assertThat(errors).isEmpty(); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(rte.getExecutionInfo())).isEqualTo(node0.getAddress()); + } + + // there should have been no retry. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(0); + + // expect 1 message: RETHROW + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(1); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 2, + true, + 0, + RetryVerdict.RETHROW)); + } + + @Test + public void should_retry_on_same_on_read_timeout_when_enough_responses_but_data_not_present() { + // given a node that will respond to query with a read timeout where data is present. + node0.prime( + when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 2, 2, false))); + + try { + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("expected a ReadTimeoutException"); + } catch (ReadTimeoutException rte) { + // then an exception should have been thrown + assertThat(rte) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); + assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rte.getReceived()).isEqualTo(2); + assertThat(rte.getBlockFor()).isEqualTo(2); + assertThat(rte.wasDataPresent()).isFalse(); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(rte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM as well + List> errors = rte.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + ReadTimeoutException.class, + rte1 -> { + assertThat(rte1) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); + assertThat(rte1.getConsistencyLevel()) + .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rte1.getReceived()).isEqualTo(2); + assertThat(rte1.getBlockFor()).isEqualTo(2); + assertThat(rte1.wasDataPresent()).isFalse(); + }); + } + + // there should have been a retry, and it should have been executed on the same host, + // with same consistency. + localQuorumCounter.assertTotalCount(2); + localQuorumCounter.assertNodeCounts(2, 0, 0); + oneCounter.assertTotalCount(0); + + // expect 2 messages: RETRY_SAME, then RETHROW + verify(appender, timeout(2000).times(2)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(2); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 2, + false, + 0, + RetryVerdict.RETRY_SAME)); + assertThat(loggedEvents.get(1).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 2, + false, + 1, + RetryVerdict.RETHROW)); + } + + @Test + public void should_downgrade_on_read_timeout_when_not_enough_responses() { + // given a node that will respond to a query with a read timeout where 2 out of 3 responses are + // received. In this case, digest requests succeeded, but not the data request. + node0.prime( + when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, true))); + + ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM + List> errors = rs.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + ReadTimeoutException.class, + rte -> { + assertThat(rte) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded)"); + assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rte.getReceived()).isEqualTo(1); + assertThat(rte.getBlockFor()).isEqualTo(2); + assertThat(rte.wasDataPresent()).isTrue(); + }); + + // should have succeeded in second attempt at ONE + Statement request = (Statement) rs.getExecutionInfo().getRequest(); + assertThat(request.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); + + // there should have been a retry, and it should have been executed on the same host, + // but with consistency ONE. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(1); + oneCounter.assertNodeCounts(1, 0, 0); + + // expect 1 message: RETRY_SAME with ONE + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(1); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 1, + true, + 0, + new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); + } + + @Test + public void should_retry_on_read_timeout_when_enough_responses_and_data_not_present() { + // given a node that will respond to a query with a read timeout where 3 out of 3 responses are + // received, but data is not present. + node0.prime( + when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 2, 2, false))); + + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected a ReadTimeoutException"); + } catch (ReadTimeoutException rte) { + // then a read timeout exception is thrown. + assertThat(rte) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); + assertThat(rte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rte.getReceived()).isEqualTo(2); + assertThat(rte.getBlockFor()).isEqualTo(2); + assertThat(rte.wasDataPresent()).isFalse(); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(rte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM + List> errors = rte.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + ReadTimeoutException.class, + rte1 -> { + assertThat(rte) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (the replica queried for data didn't respond)"); + assertThat(rte1.getConsistencyLevel()) + .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rte1.getReceived()).isEqualTo(2); + assertThat(rte1.getBlockFor()).isEqualTo(2); + assertThat(rte1.wasDataPresent()).isFalse(); + }); + } + + // there should have been a retry, and it should have been executed on the same host. + localQuorumCounter.assertTotalCount(2); + localQuorumCounter.assertNodeCounts(2, 0, 0); + oneCounter.assertTotalCount(0); + + // verify log events were emitted as expected + verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(2); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 2, + false, + 0, + RetryVerdict.RETRY_SAME)); + assertThat(loggedEvents.get(1).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 2, + false, + 1, + RetryVerdict.RETHROW)); + } + + @Test + public void should_only_retry_once_on_read_type() { + // given a node that will respond to a query with a read timeout at 2 CLs. + node0.prime( + when(QUERY_LOCAL_QUORUM).then(readTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, true))); + node0.prime(when(QUERY_ONE).then(readTimeout(ConsistencyLevel.ONE, 0, 1, false))); + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected a ReadTimeoutException"); + } catch (ReadTimeoutException wte) { + // then a read timeout exception is thrown + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during read query at consistency ONE (1 responses were required but only 0 replica responded)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); + assertThat(wte.getReceived()).isEqualTo(0); + assertThat(wte.getBlockFor()).isEqualTo(1); + assertThat(wte.wasDataPresent()).isFalse(); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM as well + List> errors = wte.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + ReadTimeoutException.class, + wte1 -> { + assertThat(wte1) + .hasMessageContaining( + "Cassandra timeout during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded)"); + assertThat(wte1.getConsistencyLevel()) + .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte1.getReceived()).isEqualTo(1); + assertThat(wte1.getBlockFor()).isEqualTo(2); + assertThat(wte1.wasDataPresent()).isTrue(); + }); + } + + // should have been retried on same host, but at consistency ONE. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(1); + oneCounter.assertNodeCounts(1, 0, 0); + + // verify log events were emitted as expected + verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(2); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 1, + true, + 0, + new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); + assertThat(loggedEvents.get(1).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_READ_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.ONE, + 1, + 0, + false, + 1, + RetryVerdict.RETHROW)); + } + + @Test + public void should_retry_on_write_timeout_if_write_type_batch_log() { + // given a node that will respond to query with a write timeout with write type of batch log. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, BATCH_LOG))); + + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("WriteTimeoutException expected"); + } catch (WriteTimeoutException wte) { + // then a write timeout exception is thrown + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during BATCH_LOG write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte.getReceived()).isEqualTo(1); + assertThat(wte.getBlockFor()).isEqualTo(2); + assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM as well + List> errors = wte.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + WriteTimeoutException.class, + wte1 -> { + assertThat(wte1) + .hasMessageContaining( + "Cassandra timeout during BATCH_LOG write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); + assertThat(wte1.getConsistencyLevel()) + .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte1.getReceived()).isEqualTo(1); + assertThat(wte1.getBlockFor()).isEqualTo(2); + assertThat(wte1.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); + }); + } + + // there should have been a retry, and it should have been executed on the same host. + localQuorumCounter.assertTotalCount(2); + localQuorumCounter.assertNodeCounts(2, 0, 0); + oneCounter.assertTotalCount(0); + + // verify log events were emitted as expected + verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(2); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + DefaultWriteType.BATCH_LOG, + 2, + 1, + 0, + RetryVerdict.RETRY_SAME)); + assertThat(loggedEvents.get(1).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + DefaultWriteType.BATCH_LOG, + 2, + 1, + 1, + RetryVerdict.RETHROW)); + } + + @Test + public void should_not_retry_on_write_timeout_if_write_type_batch_log_but_non_idempotent() { + // given a node that will respond to query with a write timeout with write type of batch log. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, BATCH_LOG))); + + try { + // when executing a non-idempotent query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); + fail("WriteTimeoutException expected"); + } catch (WriteTimeoutException wte) { + // then a write timeout exception is thrown + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during BATCH_LOG write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte.getReceived()).isEqualTo(1); + assertThat(wte.getBlockFor()).isEqualTo(2); + assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.BATCH_LOG); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should not have been retried + List> errors = wte.getExecutionInfo().getErrors(); + assertThat(errors).isEmpty(); + } + + // should not have been retried. + localQuorumCounter.assertTotalCount(1); + oneCounter.assertTotalCount(0); + + // expect no logging messages since there was no retry + verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); + } + + @DataProvider({"SIMPLE,SIMPLE", "BATCH,BATCH"}) + @Test + public void should_ignore_on_write_timeout_if_write_type_ignorable_and_at_least_one_ack_received( + WriteType writeType, DefaultWriteType driverWriteType) { + // given a node that will respond to query with a write timeout with write type that is either + // SIMPLE or BATCH. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, writeType))); + + // when executing a query. + ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + + // should have ignored the write timeout + assertThat(rs.all()).isEmpty(); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); + assertThat(rs.getExecutionInfo().getErrors()).isEmpty(); + + // should not have been retried. + localQuorumCounter.assertTotalCount(1); + oneCounter.assertTotalCount(0); + + // verify log event was emitted for each host as expected + verify(appender, after(500)).doAppend(loggingEventCaptor.capture()); + // final log message should have 2 retries + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + driverWriteType, + 2, + 1, + 0, + RetryVerdict.IGNORE)); + } + + @DataProvider({"SIMPLE,SIMPLE", "BATCH,BATCH"}) + @Test + public void should_throw_on_write_timeout_if_write_type_ignorable_but_no_ack_received( + WriteType writeType, DefaultWriteType driverWriteType) { + // given a node that will respond to query with a write timeout with write type that is either + // SIMPLE or BATCH. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 0, 2, writeType))); + + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("WriteTimeoutException expected"); + } catch (WriteTimeoutException wte) { + // then a write timeout exception is thrown + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during " + + driverWriteType + + " write query at consistency LOCAL_QUORUM (2 replica were required but only 0 acknowledged the write)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte.getReceived()).isEqualTo(0); + assertThat(wte.getBlockFor()).isEqualTo(2); + assertThat(wte.getWriteType()).isEqualTo(driverWriteType); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should not have been retried + List> errors = wte.getExecutionInfo().getErrors(); + assertThat(errors).isEmpty(); + } + + // should not have been retried. + localQuorumCounter.assertTotalCount(1); + oneCounter.assertTotalCount(0); + + // verify log event was emitted for each host as expected + verify(appender, after(500)).doAppend(loggingEventCaptor.capture()); + // final log message should have 2 retries + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + driverWriteType, + 2, + 0, + 0, + RetryVerdict.RETHROW)); + } + + @Test + public void should_downgrade_on_write_timeout_if_write_type_unlogged_batch() { + // given a node that will respond to query with a write timeout with write type of batch log. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, UNLOGGED_BATCH))); + + // when executing a query. + ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM + List> errors = rs.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + WriteTimeoutException.class, + wte -> { + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during UNLOGGED_BATCH write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte.getReceived()).isEqualTo(1); + assertThat(wte.getBlockFor()).isEqualTo(2); + assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); + }); + + // should have succeeded in second attempt at ONE + Statement request = (Statement) rs.getExecutionInfo().getRequest(); + assertThat(request.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); + + // there should have been a retry, and it should have been executed on the same host, + // but at consistency ONE. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(1); + oneCounter.assertNodeCounts(1, 0, 0); + + // verify 1 log event was emitted as expected + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(1); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + DefaultWriteType.UNLOGGED_BATCH, + 2, + 1, + 0, + new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); + } + + @Test + public void + should_not_downgrade_on_write_timeout_if_write_type_unlogged_batch_and_non_idempotent() { + // given a node that will respond to query with a write timeout with write type UNLOGGED_BATCH. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, UNLOGGED_BATCH))); + + try { + // when executing a non-idempotent query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); + fail("WriteTimeoutException expected"); + } catch (WriteTimeoutException wte) { + // then a write timeout exception is thrown + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during UNLOGGED_BATCH write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte.getReceived()).isEqualTo(1); + assertThat(wte.getBlockFor()).isEqualTo(2); + assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should not have been retried + List> errors = wte.getExecutionInfo().getErrors(); + assertThat(errors).isEmpty(); + } + + // should not have been retried. + localQuorumCounter.assertTotalCount(1); + oneCounter.assertTotalCount(0); + + // expect no logging messages since there was no retry + verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); + } + + @Test + public void should_only_retry_once_on_write_type() { + // given a node that will respond to a query with a write timeout at 2 CLs. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(writeTimeout(ConsistencyLevel.LOCAL_QUORUM, 1, 2, UNLOGGED_BATCH))); + node0.prime(when(QUERY_ONE).then(writeTimeout(ConsistencyLevel.ONE, 0, 1, UNLOGGED_BATCH))); + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected a WriteTimeoutException"); + } catch (WriteTimeoutException wte) { + // then a write timeout exception is thrown + assertThat(wte) + .hasMessageContaining( + "Cassandra timeout during UNLOGGED_BATCH write query at consistency ONE (1 replica were required but only 0 acknowledged the write)"); + assertThat(wte.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); + assertThat(wte.getReceived()).isEqualTo(0); + assertThat(wte.getBlockFor()).isEqualTo(1); + assertThat(wte.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); + // the host that returned the response should be node 0. + assertThat(coordinatorAddress(wte.getExecutionInfo())).isEqualTo(node0.getAddress()); + // should have failed at first attempt at LOCAL_QUORUM as well + List> errors = wte.getExecutionInfo().getErrors(); + assertThat(errors).hasSize(1); + Entry error = errors.get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + WriteTimeoutException.class, + wte1 -> { + assertThat(wte1) + .hasMessageContaining( + "Cassandra timeout during UNLOGGED_BATCH write query at consistency LOCAL_QUORUM (2 replica were required but only 1 acknowledged the write)"); + assertThat(wte1.getConsistencyLevel()) + .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wte1.getReceived()).isEqualTo(1); + assertThat(wte1.getBlockFor()).isEqualTo(2); + assertThat(wte1.getWriteType()).isEqualTo(DefaultWriteType.UNLOGGED_BATCH); + }); + } + + // should have been retried on same host, but at consistency ONE. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(1); + oneCounter.assertNodeCounts(1, 0, 0); + + // verify log events were emitted as expected + verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(2); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + DefaultWriteType.UNLOGGED_BATCH, + 2, + 1, + 0, + new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); + assertThat(loggedEvents.get(1).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_WRITE_TIMEOUT, + logPrefix, + DefaultConsistencyLevel.ONE, + DefaultWriteType.UNLOGGED_BATCH, + 1, + 0, + 1, + RetryVerdict.RETHROW)); + } + + @Test + public void should_retry_on_next_host_on_unavailable_if_LWT() { + // given a node that will respond to a query with an unavailable. + node0.prime(when(QUERY_LOCAL_SERIAL).then(unavailable(ConsistencyLevel.LOCAL_SERIAL, 2, 1))); + + // when executing a query. + ResultSet result = sessionRule.session().execute(STATEMENT_LOCAL_SERIAL); + // then we should get a response, and the host that returned the response should be node 1. + assertThat(coordinatorAddress(result.getExecutionInfo())).isEqualTo(node1.getAddress()); + // the execution info on the result set indicates there was + // an error on the host that received the query. + assertThat(result.getExecutionInfo().getErrors()).hasSize(1); + Map.Entry error = result.getExecutionInfo().getErrors().get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + UnavailableException.class, + ue -> { + assertThat(ue) + .hasMessageContaining( + "Not enough replicas available for query at consistency LOCAL_SERIAL (2 required but only 1 alive)"); + assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_SERIAL); + assertThat(ue.getAlive()).isEqualTo(1); + assertThat(ue.getRequired()).isEqualTo(2); + }); + + // should have been retried on another host. + localSerialCounter.assertTotalCount(2); + localSerialCounter.assertNodeCounts(1, 1, 0); + localQuorumCounter.assertTotalCount(0); + oneCounter.assertTotalCount(0); + + // verify log event was emitted as expected + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, + logPrefix, + DefaultConsistencyLevel.LOCAL_SERIAL, + 2, + 1, + 0, + RetryVerdict.RETRY_NEXT)); + } + + @Test + public void should_downgrade_on_unavailable() { + // given a node that will respond to a query with an unavailable. + node0.prime(when(QUERY_LOCAL_QUORUM).then(unavailable(ConsistencyLevel.LOCAL_QUORUM, 2, 1))); + + // when executing a query. + ResultSet rs = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + // then we should get a response, and the host that returned the response should be node 0. + assertThat(coordinatorAddress(rs.getExecutionInfo())).isEqualTo(node0.getAddress()); + // the execution info on the result set indicates there was + // an error on the host that received the query. + assertThat(rs.getExecutionInfo().getErrors()).hasSize(1); + Map.Entry error = rs.getExecutionInfo().getErrors().get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + UnavailableException.class, + ue -> { + assertThat(ue) + .hasMessageContaining( + "Not enough replicas available for query at consistency LOCAL_QUORUM (2 required but only 1 alive)"); + assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(ue.getAlive()).isEqualTo(1); + assertThat(ue.getRequired()).isEqualTo(2); + }); + + // should have succeeded in second attempt at ONE + Statement request = (Statement) rs.getExecutionInfo().getRequest(); + assertThat(request.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); + + // should have been retried on the same host, but at ONE. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(1); + oneCounter.assertNodeCounts(1, 0, 0); + + // verify log event was emitted as expected + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 1, + 0, + new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); + } + + @Test + public void should_only_retry_once_on_unavailable() { + // given two nodes that will respond to a query with an unavailable. + node0.prime(when(QUERY_LOCAL_QUORUM).then(unavailable(ConsistencyLevel.LOCAL_QUORUM, 2, 1))); + node0.prime(when(QUERY_ONE).then(unavailable(ConsistencyLevel.ONE, 1, 0))); + + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected an UnavailableException"); + } catch (UnavailableException ue) { + // then we should get an unavailable exception with the host being node 1 (since it was second + // tried). + assertThat(ue) + .hasMessageContaining( + "Not enough replicas available for query at consistency ONE (1 required but only 0 alive)"); + assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.ONE); + assertThat(ue.getRequired()).isEqualTo(1); + assertThat(ue.getAlive()).isEqualTo(0); + assertThat(ue.getExecutionInfo().getErrors()).hasSize(1); + Map.Entry error = ue.getExecutionInfo().getErrors().get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()) + .isInstanceOfSatisfying( + UnavailableException.class, + ue1 -> { + assertThat(ue1) + .hasMessageContaining( + "Not enough replicas available for query at consistency LOCAL_QUORUM (2 required but only 1 alive)"); + assertThat(ue1.getConsistencyLevel()) + .isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(ue1.getRequired()).isEqualTo(2); + assertThat(ue1.getAlive()).isEqualTo(1); + }); + } + + // should have been retried on same host, but at ONE. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(1); + oneCounter.assertNodeCounts(1, 0, 0); + + // verify log events were emitted as expected + verify(appender, timeout(500).times(2)).doAppend(loggingEventCaptor.capture()); + List loggedEvents = loggingEventCaptor.getAllValues(); + assertThat(loggedEvents).hasSize(2); + assertThat(loggedEvents.get(0).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, + logPrefix, + DefaultConsistencyLevel.LOCAL_QUORUM, + 2, + 1, + 0, + new ConsistencyDowngradingRetryVerdict(DefaultConsistencyLevel.ONE))); + assertThat(loggedEvents.get(1).getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_UNAVAILABLE, + logPrefix, + DefaultConsistencyLevel.ONE, + 1, + 0, + 1, + RetryVerdict.RETHROW)); + } + + @Test + public void should_retry_on_next_host_on_connection_error_if_idempotent() { + // given a node that will close its connection as result of receiving a query. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); + + // when executing a query. + ResultSet result = sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + // then we should get a response, and the execution info on the result set indicates there was + // an error on the host that received the query. + assertThat(result.getExecutionInfo().getErrors()).hasSize(1); + Map.Entry error = result.getExecutionInfo().getErrors().get(0); + assertThat(error.getKey().getEndPoint().resolve()).isEqualTo(node0.getAddress()); + assertThat(error.getValue()).isInstanceOf(ClosedConnectionException.class); + // the host that returned the response should be node 1. + assertThat(coordinatorAddress(result.getExecutionInfo())).isEqualTo(node1.getAddress()); + + // should have been retried. + localQuorumCounter.assertTotalCount(2); + // expected query on node 0, and retry on node 2. + localQuorumCounter.assertNodeCounts(1, 1, 0); + oneCounter.assertTotalCount(0); + + // verify log event was emitted as expected + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_ABORTED, + logPrefix, + ClosedConnectionException.class.getSimpleName(), + error.getValue().getMessage(), + 0, + RetryVerdict.RETRY_NEXT)); + } + + @Test + public void should_keep_retrying_on_next_host_on_connection_error() { + // given a request for which every node will close its connection upon receiving it. + SIMULACRON_RULE + .cluster() + .prime( + when(QUERY_LOCAL_QUORUM) + .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); + + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("AllNodesFailedException expected"); + } catch (AllNodesFailedException ex) { + // then an AllNodesFailedException should be raised indicating that all nodes failed the + // request. + assertThat(ex.getAllErrors()).hasSize(3); + } + + // should have been tried on all nodes. + // should have been retried. + localQuorumCounter.assertTotalCount(3); + // expected query on node 0, and retry on node 2 and 3. + localQuorumCounter.assertNodeCounts(1, 1, 1); + oneCounter.assertTotalCount(0); + + // verify log event was emitted for each host as expected + verify(appender, after(500).times(3)).doAppend(loggingEventCaptor.capture()); + // final log message should have 2 retries + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_ABORTED, + logPrefix, + ClosedConnectionException.class.getSimpleName(), + "Lost connection to remote peer", + 2, + RetryVerdict.RETRY_NEXT)); + } + + @Test + public void should_not_retry_on_connection_error_if_non_idempotent() { + // given a node that will close its connection as result of receiving a query. + node0.prime( + when(QUERY_LOCAL_QUORUM) + .then(closeConnection(DisconnectAction.Scope.CONNECTION, CloseType.DISCONNECT))); + + try { + // when executing a non-idempotent query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); + fail("ClosedConnectionException expected"); + } catch (ClosedConnectionException ex) { + // then a ClosedConnectionException should be raised, indicating that the connection closed + // while handling the request on that node. + // this clearly indicates that the request wasn't retried. + // Exception should indicate that node 0 was the failing node. + // FIXME JAVA-2908 + // Node coordinator = ex.getExecutionInfo().getCoordinator(); + // assertThat(coordinator).isNotNull(); + // assertThat(coordinator.getEndPoint().resolve()) + // .isEqualTo(SIMULACRON_RULE.cluster().node(0).getAddress()); + } + + // should not have been retried. + localQuorumCounter.assertTotalCount(1); + oneCounter.assertTotalCount(0); + + // expect no logging messages since there was no retry + verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); + } + + @Test + public void should_keep_retrying_on_next_host_on_error_response() { + // given every node responding with a server error. + SIMULACRON_RULE + .cluster() + .prime(when(QUERY_LOCAL_QUORUM).then(serverError("this is a server error"))); + + try { + // when executing a query. + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected an AllNodesFailedException"); + } catch (AllNodesFailedException e) { + // then we should get an all nodes failed exception, indicating the query was tried each node. + assertThat(e.getAllErrors()).hasSize(3); + for (List nodeErrors : e.getAllErrors().values()) { + for (Throwable nodeError : nodeErrors) { + assertThat(nodeError).isInstanceOf(ServerError.class); + assertThat(nodeError).hasMessage("this is a server error"); + } + } + } + + // should have been tried on all nodes. + localQuorumCounter.assertTotalCount(3); + localQuorumCounter.assertNodeCounts(1, 1, 1); + + // verify log event was emitted for each host as expected + verify(appender, after(500).times(3)).doAppend(loggingEventCaptor.capture()); + // final log message should have 2 retries + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_ERROR, + logPrefix, + ServerError.class.getSimpleName(), + "this is a server error", + 2, + RetryVerdict.RETRY_NEXT)); + } + + @Test + public void should_not_retry_on_next_host_on_error_response_if_write_failure() { + // given every node responding with a write failure. + SIMULACRON_RULE + .cluster() + .prime( + when(QUERY_LOCAL_QUORUM) + .then( + writeFailure( + ConsistencyLevel.LOCAL_QUORUM, 1, 2, ImmutableMap.of(), WriteType.SIMPLE))); + try { + // when executing a query + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected a WriteFailureException"); + } catch (WriteFailureException wfe) { + // then we should get a write failure exception with the host being node 1 (since it was + // second tried). + assertThat(wfe) + .hasMessageContaining( + "Cassandra failure during write query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)"); + assertThat(wfe.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(wfe.getBlockFor()).isEqualTo(2); + assertThat(wfe.getReceived()).isEqualTo(1); + assertThat(wfe.getWriteType()).isEqualTo(DefaultWriteType.SIMPLE); + assertThat(wfe.getReasonMap()).isEmpty(); + } + + // should only have been tried on first node. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(0); + + // verify log event was emitted as expected + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_ERROR, + logPrefix, + WriteFailureException.class.getSimpleName(), + "Cassandra failure during write query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)", + 0, + RetryVerdict.RETHROW)); + } + + @Test + public void should_not_retry_on_next_host_on_error_response_if_read_failure() { + // given every node responding with a read failure. + SIMULACRON_RULE + .cluster() + .prime( + when(QUERY_LOCAL_QUORUM) + .then(readFailure(ConsistencyLevel.LOCAL_QUORUM, 1, 2, ImmutableMap.of(), true))); + try { + // when executing a query + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM); + fail("Expected a ReadFailureException"); + } catch (ReadFailureException rfe) { + // then we should get a read failure exception with the host being node 1 (since it was + // second tried). + assertThat(rfe) + .hasMessageContaining( + "Cassandra failure during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)"); + assertThat(rfe.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); + assertThat(rfe.getBlockFor()).isEqualTo(2); + assertThat(rfe.getReceived()).isEqualTo(1); + assertThat(rfe.wasDataPresent()).isTrue(); + assertThat(rfe.getReasonMap()).isEmpty(); + } + + // should only have been tried on first node. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(0); + + // verify log event was emitted as expected + verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + assertThat(loggingEventCaptor.getValue().getFormattedMessage()) + .isEqualTo( + expectedMessage( + ConsistencyDowngradingRetryPolicy.VERDICT_ON_ERROR, + logPrefix, + ReadFailureException.class.getSimpleName(), + "Cassandra failure during read query at consistency LOCAL_QUORUM (2 responses were required but only 1 replica responded, 0 failed)", + 0, + RetryVerdict.RETHROW)); + } + + @Test + public void should_not_retry_on_next_host_on_error_response_if_non_idempotent() { + // given every node responding with a server error. + SIMULACRON_RULE + .cluster() + .prime(when(QUERY_LOCAL_QUORUM).then(serverError("this is a server error"))); + + try { + // when executing a query that is not idempotent + sessionRule.session().execute(STATEMENT_LOCAL_QUORUM.setIdempotent(false)); + fail("Expected a ServerError"); + } catch (ServerError e) { + // then should get a server error from first host. + assertThat(e.getMessage()).isEqualTo("this is a server error"); + } + + // should only have been tried on first node. + localQuorumCounter.assertTotalCount(1); + localQuorumCounter.assertNodeCounts(1, 0, 0); + oneCounter.assertTotalCount(0); + + // expect no logging messages since there was no retry + verify(appender, after(500).times(0)).doAppend(any(ILoggingEvent.class)); + } + + private String expectedMessage(String template, Object... args) { + return MessageFormatter.arrayFormat(template, args).getMessage(); + } + + private SocketAddress coordinatorAddress(ExecutionInfo executionInfo) { + Node coordinator = executionInfo.getCoordinator(); + assertThat(coordinator).isNotNull(); + return coordinator.getEndPoint().resolve(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/DefaultRetryPolicyIT.java similarity index 88% rename from integration-tests/src/test/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicyIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/retry/DefaultRetryPolicyIT.java index 4527de4edf6..4a3cebf914f 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/retry/DefaultRetryPolicyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/DefaultRetryPolicyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.internal.core.retry; +package com.datastax.oss.driver.core.retry; import static com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_QUORUM; import static com.datastax.oss.simulacron.common.codec.WriteType.BATCH_LOG; @@ -53,6 +55,7 @@ import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; import com.datastax.oss.simulacron.common.stubbing.CloseType; import com.datastax.oss.simulacron.common.stubbing.DisconnectAction; @@ -60,6 +63,7 @@ import com.tngtech.java.junit.dataprovider.DataProviderRunner; import com.tngtech.java.junit.dataprovider.UseDataProvider; import java.util.Arrays; +import java.util.List; import java.util.Map; import org.junit.After; import org.junit.Before; @@ -73,12 +77,12 @@ @RunWith(DataProviderRunner.class) public class DefaultRetryPolicyIT { - - public static @ClassRule SimulacronRule simulacron = + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(3)); public @Rule SessionRule sessionRule = - SessionRule.builder(simulacron) + SessionRule.builder(SIMULACRON_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withBoolean(DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE, true) @@ -101,9 +105,8 @@ public class DefaultRetryPolicyIT { private Level oldLevel; private String logPrefix; - @SuppressWarnings("deprecation") private final QueryCounter counter = - QueryCounter.builder(simulacron.cluster()) + QueryCounter.builder(SIMULACRON_RULE.cluster()) .withFilter((l) -> l.getQuery().equals(queryStr)) .build(); @@ -116,8 +119,8 @@ public void setup() { // the log prefix we expect in retry logging messages. logPrefix = sessionRule.session().getName() + "|default"; // clear activity logs and primes between tests since simulacron instance is shared. - simulacron.cluster().clearLogs(); - simulacron.cluster().clearPrimes(true); + SIMULACRON_RULE.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearPrimes(true); } @After @@ -129,7 +132,10 @@ public void teardown() { @Test public void should_not_retry_on_read_timeout_when_data_present() { // given a node that will respond to query with a read timeout where data is present. - simulacron.cluster().node(0).prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 1, 3, true))); + SIMULACRON_RULE + .cluster() + .node(0) + .prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 1, 3, true))); try { // when executing a query @@ -155,7 +161,10 @@ public void should_not_retry_on_read_timeout_when_less_than_blockFor_received() // given a node that will respond to a query with a read timeout where 2 out of 3 responses are // received. // in this case, digest requests succeeded, but not the data request. - simulacron.cluster().node(0).prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 2, 3, false))); + SIMULACRON_RULE + .cluster() + .node(0) + .prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 2, 3, false))); try { // when executing a query @@ -181,7 +190,10 @@ public void should_retry_on_read_timeout_when_enough_responses_and_data_not_pres // given a node that will respond to a query with a read timeout where 3 out of 3 responses are // received, // but data is not present. - simulacron.cluster().node(0).prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 3, 3, false))); + SIMULACRON_RULE + .cluster() + .node(0) + .prime(when(queryStr).then(readTimeout(LOCAL_QUORUM, 3, 3, false))); try { // when executing a query. @@ -216,7 +228,7 @@ public void should_retry_on_read_timeout_when_enough_responses_and_data_not_pres @Test public void should_retry_on_next_host_on_connection_error_if_idempotent() { // given a node that will close its connection as result of receiving a query. - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime( @@ -231,11 +243,11 @@ public void should_retry_on_next_host_on_connection_error_if_idempotent() { assertThat(result.getExecutionInfo().getErrors()).hasSize(1); Map.Entry error = result.getExecutionInfo().getErrors().get(0); assertThat(error.getKey().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(0).inetSocketAddress()); + .isEqualTo(SIMULACRON_RULE.cluster().node(0).inetSocketAddress()); assertThat(error.getValue()).isInstanceOf(ClosedConnectionException.class); // the host that returned the response should be node 1. assertThat(result.getExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(1).inetSocketAddress()); + .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); // should have been retried. counter.assertTotalCount(2); @@ -251,7 +263,7 @@ public void should_retry_on_next_host_on_connection_error_if_idempotent() { @Test public void should_keep_retrying_on_next_host_on_connection_error() { // given a request for which every node will close its connection upon receiving it. - simulacron + SIMULACRON_RULE .cluster() .prime( when(queryStr) @@ -264,7 +276,7 @@ public void should_keep_retrying_on_next_host_on_connection_error() { } catch (AllNodesFailedException ex) { // then an AllNodesFailedException should be raised indicating that all nodes failed the // request. - assertThat(ex.getErrors()).hasSize(3); + assertThat(ex.getAllErrors()).hasSize(3); } // should have been tried on all nodes. @@ -283,7 +295,7 @@ public void should_keep_retrying_on_next_host_on_connection_error() { @Test public void should_not_retry_on_connection_error_if_non_idempotent() { // given a node that will close its connection as result of receiving a query. - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime( @@ -315,7 +327,7 @@ public void should_not_retry_on_connection_error_if_non_idempotent() { @Test public void should_retry_on_write_timeout_if_write_type_batch_log() { // given a node that will respond to query with a write timeout with write type of batch log. - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime(when(queryStr).then(writeTimeout(LOCAL_QUORUM, 1, 3, BATCH_LOG))); @@ -367,7 +379,7 @@ public void should_not_retry_on_write_timeout_if_write_type_non_batch_log( com.datastax.oss.simulacron.common.codec.WriteType writeType) { // given a node that will respond to query with a write timeout with write type that is not // batch log. - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime(when(queryStr).then(writeTimeout(LOCAL_QUORUM, 1, 3, writeType))); @@ -393,7 +405,7 @@ public void should_not_retry_on_write_timeout_if_write_type_non_batch_log( @Test public void should_not_retry_on_write_timeout_if_write_type_batch_log_but_non_idempotent() { // given a node that will respond to query with a write timeout with write type of batch log. - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime(when(queryStr).then(writeTimeout(LOCAL_QUORUM, 1, 3, BATCH_LOG))); @@ -422,7 +434,7 @@ public void should_not_retry_on_write_timeout_if_write_type_batch_log_but_non_id @Test public void should_retry_on_next_host_on_unavailable() { // given a node that will respond to a query with an unavailable. - simulacron.cluster().node(0).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); + SIMULACRON_RULE.cluster().node(0).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); // when executing a query. ResultSet result = sessionRule.session().execute(queryStr); @@ -432,11 +444,11 @@ public void should_retry_on_next_host_on_unavailable() { assertThat(result.getExecutionInfo().getErrors()).hasSize(1); Map.Entry error = result.getExecutionInfo().getErrors().get(0); assertThat(error.getKey().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(0).inetSocketAddress()); + .isEqualTo(SIMULACRON_RULE.cluster().node(0).inetSocketAddress()); assertThat(error.getValue()).isInstanceOf(UnavailableException.class); // the host that returned the response should be node 1. assertThat(result.getExecutionInfo().getCoordinator().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(1).inetSocketAddress()); + .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); // should have been retried on another host. counter.assertTotalCount(2); @@ -453,8 +465,8 @@ public void should_retry_on_next_host_on_unavailable() { @Test public void should_only_retry_once_on_unavailable() { // given two nodes that will respond to a query with an unavailable. - simulacron.cluster().node(0).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); - simulacron.cluster().node(1).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); + SIMULACRON_RULE.cluster().node(0).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); + SIMULACRON_RULE.cluster().node(1).prime(when(queryStr).then(unavailable(LOCAL_QUORUM, 3, 0))); try { // when executing a query. @@ -464,7 +476,7 @@ public void should_only_retry_once_on_unavailable() { // then we should get an unavailable exception with the host being node 1 (since it was second // tried). assertThat(ue.getCoordinator().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(1).inetSocketAddress()); + .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); assertThat(ue.getConsistencyLevel()).isEqualTo(DefaultConsistencyLevel.LOCAL_QUORUM); assertThat(ue.getRequired()).isEqualTo(3); assertThat(ue.getAlive()).isEqualTo(0); @@ -478,7 +490,7 @@ public void should_only_retry_once_on_unavailable() { @Test public void should_keep_retrying_on_next_host_on_error_response() { // given every node responding with a server error. - simulacron.cluster().prime(when(queryStr).then(serverError("this is a server error"))); + SIMULACRON_RULE.cluster().prime(when(queryStr).then(serverError("this is a server error"))); try { // when executing a query. @@ -486,9 +498,11 @@ public void should_keep_retrying_on_next_host_on_error_response() { fail("Expected an AllNodesFailedException"); } catch (AllNodesFailedException e) { // then we should get an all nodes failed exception, indicating the query was tried each node. - assertThat(e.getErrors()).hasSize(3); - for (Throwable t : e.getErrors().values()) { - assertThat(t).isInstanceOf(ServerError.class); + assertThat(e.getAllErrors()).hasSize(3); + for (List nodeErrors : e.getAllErrors().values()) { + for (Throwable nodeError : nodeErrors) { + assertThat(nodeError).isInstanceOf(ServerError.class); + } } } @@ -506,7 +520,7 @@ public void should_keep_retrying_on_next_host_on_error_response() { @Test public void should_not_retry_on_next_host_on_error_response_if_non_idempotent() { // given every node responding with a server error. - simulacron.cluster().prime(when(queryStr).then(serverError("this is a server error"))); + SIMULACRON_RULE.cluster().prime(when(queryStr).then(serverError("this is a server error"))); try { // when executing a query that is not idempotent diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/retry/PerProfileRetryPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/PerProfileRetryPolicyIT.java similarity index 81% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/retry/PerProfileRetryPolicyIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/retry/PerProfileRetryPolicyIT.java index 69d0631e767..b2e53bb09d0 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/retry/PerProfileRetryPolicyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/retry/PerProfileRetryPolicyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.retry; +package com.datastax.oss.driver.core.retry; import static com.datastax.oss.driver.assertions.Assertions.assertThat; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; @@ -29,6 +31,8 @@ import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.retry.RetryDecision; +import com.datastax.oss.driver.api.core.retry.RetryPolicy; import com.datastax.oss.driver.api.core.servererrors.CoordinatorException; import com.datastax.oss.driver.api.core.servererrors.UnavailableException; import com.datastax.oss.driver.api.core.servererrors.WriteType; @@ -56,10 +60,11 @@ public class PerProfileRetryPolicyIT { // Shared across all tests methods. - private static SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(2)); + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(2)); - private static SessionRule sessionRule = - SessionRule.builder(simulacron) + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withClass( @@ -73,26 +78,26 @@ public class PerProfileRetryPolicyIT { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); private static String QUERY_STRING = "select * from foo"; private static final SimpleStatement QUERY = SimpleStatement.newInstance(QUERY_STRING); - @SuppressWarnings("deprecation") private final QueryCounter counter = - QueryCounter.builder(simulacron.cluster()) + QueryCounter.builder(SIMULACRON_RULE.cluster()) .withFilter((l) -> l.getQuery().equals(QUERY_STRING)) .build(); @Before public void clear() { - simulacron.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearLogs(); } @BeforeClass public static void setup() { // node 0 will return an unavailable to query. - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime( @@ -101,10 +106,10 @@ public static void setup() { unavailable( com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, 1, 0))); // node 1 will return a valid empty rows response. - simulacron.cluster().node(1).prime(when(QUERY_STRING).then(noRows())); + SIMULACRON_RULE.cluster().node(1).prime(when(QUERY_STRING).then(noRows())); // sanity checks - DriverContext context = sessionRule.session().getContext(); + DriverContext context = SESSION_RULE.session().getContext(); DriverConfig config = context.getConfig(); assertThat(config.getProfiles()).containsKeys("profile1", "profile2"); @@ -125,14 +130,14 @@ public static void setup() { @Test(expected = UnavailableException.class) public void should_use_policy_from_request_profile() { // since profile1 uses a NoRetryPolicy, UnavailableException should surface to client. - sessionRule.session().execute(QUERY.setExecutionProfileName("profile1")); + SESSION_RULE.session().execute(QUERY.setExecutionProfileName("profile1")); } @Test public void should_use_policy_from_config_when_not_configured_in_request_profile() { // since profile2 has no configured retry policy, it should defer to configuration which uses // DefaultRetryPolicy, which should try request on next host (host 1). - ResultSet result = sessionRule.session().execute(QUERY.setExecutionProfileName("profile2")); + ResultSet result = SESSION_RULE.session().execute(QUERY.setExecutionProfileName("profile2")); // expect an unavailable exception to be present in errors. List> errors = result.getExecutionInfo().getErrors(); @@ -145,9 +150,11 @@ public void should_use_policy_from_config_when_not_configured_in_request_profile // A policy that simply rethrows always. public static class NoRetryPolicy implements RetryPolicy { + @SuppressWarnings("unused") public NoRetryPolicy(DriverContext context, String profileName) {} @Override + @Deprecated public RetryDecision onReadTimeout( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -159,6 +166,7 @@ public RetryDecision onReadTimeout( } @Override + @Deprecated public RetryDecision onWriteTimeout( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -170,6 +178,7 @@ public RetryDecision onWriteTimeout( } @Override + @Deprecated public RetryDecision onUnavailable( @NonNull Request request, @NonNull ConsistencyLevel cl, @@ -180,12 +189,14 @@ public RetryDecision onUnavailable( } @Override + @Deprecated public RetryDecision onRequestAborted( @NonNull Request request, @NonNull Throwable error, int retryCount) { return RetryDecision.RETHROW; } @Override + @Deprecated public RetryDecision onErrorResponse( @NonNull Request request, @NonNull CoordinatorException error, int retryCount) { return RetryDecision.RETHROW; diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/AddedNodeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/AddedNodeIT.java new file mode 100644 index 00000000000..1ce3fd1ca0e --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/AddedNodeIT.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.session; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; +import com.datastax.oss.driver.api.core.metadata.token.TokenRange; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.ClassRule; +import org.junit.Test; + +public class AddedNodeIT { + + @ClassRule + public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withNodes(3).build(); + + @Test + public void should_signal_and_create_pool_when_node_gets_added() { + AddListener addListener = new AddListener(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, null, addListener, null, null)) { + assertThat(session.getMetadata().getTokenMap()).isPresent(); + Set tokenRanges = session.getMetadata().getTokenMap().get().getTokenRanges(); + assertThat(tokenRanges).hasSize(3); + CCM_RULE.getCcmBridge().add(4, "dc1"); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> addListener.addedNode != null); + Map pools = ((DefaultSession) session).getPools(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> pools.containsKey(addListener.addedNode)); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> session.getMetadata().getTokenMap().get().getTokenRanges().size() == 4); + } + } + + static class AddListener implements NodeStateListener { + + volatile Node addedNode; + + @Override + public void onRemove(@NonNull Node node) {} + + @Override + public void onAdd(@NonNull Node node) { + addedNode = node; + } + + @Override + public void onUp(@NonNull Node node) {} + + @Override + public void onDown(@NonNull Node node) {} + + @Override + public void close() throws Exception {} + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/ExceptionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ExceptionIT.java similarity index 77% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/ExceptionIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/session/ExceptionIT.java index e0a26b93a2f..b3a96dde3b9 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/ExceptionIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ExceptionIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.session; +package com.datastax.oss.driver.core.session; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.unavailable; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; @@ -47,10 +49,11 @@ @Category(ParallelizableTests.class) public class ExceptionIT { - private static SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(2)); + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(2)); - private static SessionRule sessionRule = - SessionRule.builder(simulacron) + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withClass( @@ -60,19 +63,20 @@ public class ExceptionIT { .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(simulacron).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); private static String QUERY_STRING = "select * from foo"; @Before public void clear() { - simulacron.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearLogs(); } @Test public void should_expose_execution_info_on_exceptions() { // Given - simulacron + SIMULACRON_RULE .cluster() .node(0) .prime( @@ -80,22 +84,21 @@ public void should_expose_execution_info_on_exceptions() { .then( unavailable( com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, 1, 0))); - simulacron + SIMULACRON_RULE .cluster() .node(1) .prime(when(QUERY_STRING).then(PrimeDsl.invalid("Mock error message"))); // Then - assertThatThrownBy(() -> sessionRule.session().execute(QUERY_STRING)) + assertThatThrownBy(() -> SESSION_RULE.session().execute(QUERY_STRING)) .isInstanceOf(InvalidQueryException.class) .satisfies( exception -> { ExecutionInfo info = ((InvalidQueryException) exception).getExecutionInfo(); assertThat(info).isNotNull(); assertThat(info.getCoordinator().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(1).inetSocketAddress()); - assertThat(((SimpleStatement) info.getStatement()).getQuery()) - .isEqualTo(QUERY_STRING); + .isEqualTo(SIMULACRON_RULE.cluster().node(1).inetSocketAddress()); + assertThat(((SimpleStatement) info.getRequest()).getQuery()).isEqualTo(QUERY_STRING); // specex disabled => the initial execution completed the response assertThat(info.getSpeculativeExecutionCount()).isEqualTo(0); @@ -114,7 +117,7 @@ public void should_expose_execution_info_on_exceptions() { assertThat(errors).hasSize(1); Map.Entry entry0 = errors.get(0); assertThat(entry0.getKey().getEndPoint().resolve()) - .isEqualTo(simulacron.cluster().node(0).inetSocketAddress()); + .isEqualTo(SIMULACRON_RULE.cluster().node(0).inetSocketAddress()); Throwable node0Exception = entry0.getValue(); assertThat(node0Exception).isInstanceOf(UnavailableException.class); // ExecutionInfo is not exposed for retried errors diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ListenersIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ListenersIT.java new file mode 100644 index 00000000000..0fa089483fd --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ListenersIT.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.session; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; +import com.datastax.oss.driver.api.core.metadata.NodeStateListenerBase; +import com.datastax.oss.driver.api.core.metadata.SafeInitNodeStateListener; +import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListenerBase; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.tracker.RequestTracker; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@Category(ParallelizableTests.class) +@RunWith(MockitoJUnitRunner.class) +public class ListenersIT { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + @Mock private NodeStateListener nodeListener1; + @Mock private NodeStateListener nodeListener2; + @Mock private SchemaChangeListener schemaListener1; + @Mock private SchemaChangeListener schemaListener2; + @Mock private RequestTracker requestTracker1; + @Mock private RequestTracker requestTracker2; + + @Captor private ArgumentCaptor nodeCaptor1; + @Captor private ArgumentCaptor nodeCaptor2; + + @Test + public void should_inject_session_in_listeners() throws Exception { + try (CqlSession session = + (CqlSession) + SessionUtils.baseBuilder() + .addContactEndPoints(SIMULACRON_RULE.getContactPoints()) + .addNodeStateListener(new SafeInitNodeStateListener(nodeListener1, true)) + .addNodeStateListener(new SafeInitNodeStateListener(nodeListener2, true)) + .addSchemaChangeListener(schemaListener1) + .addSchemaChangeListener(schemaListener2) + .addRequestTracker(requestTracker1) + .addRequestTracker(requestTracker2) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .withClassList( + DefaultDriverOption.METADATA_NODE_STATE_LISTENER_CLASSES, + Collections.singletonList(MyNodeStateListener.class)) + .withClassList( + DefaultDriverOption.METADATA_SCHEMA_CHANGE_LISTENER_CLASSES, + Collections.singletonList(MySchemaChangeListener.class)) + .withClassList( + DefaultDriverOption.REQUEST_TRACKER_CLASSES, + Collections.singletonList(MyRequestTracker.class)) + .build()) + .build()) { + + // These NodeStateListeners are wrapped with SafeInitNodeStateListener which delays #onUp + // callbacks until #onSessionReady is called, these will all happen during session + // initialization + InOrder inOrder1 = inOrder(nodeListener1); + inOrder1.verify(nodeListener1).onSessionReady(session); + inOrder1.verify(nodeListener1).onUp(nodeCaptor1.capture()); + + InOrder inOrder2 = inOrder(nodeListener2); + inOrder2.verify(nodeListener2).onSessionReady(session); + inOrder2.verify(nodeListener2).onUp(nodeCaptor2.capture()); + + assertThat(nodeCaptor1.getValue().getEndPoint()) + .isEqualTo(SIMULACRON_RULE.getContactPoints().iterator().next()); + + assertThat(nodeCaptor2.getValue().getEndPoint()) + .isEqualTo(SIMULACRON_RULE.getContactPoints().iterator().next()); + + // SchemaChangeListener#onSessionReady is called asynchronously from AdminExecutor so we may + // have to wait a little + verify(schemaListener1, timeout(500).times(1)).onSessionReady(session); + verify(schemaListener2, timeout(500).times(1)).onSessionReady(session); + + // Request tracker #onSessionReady is called synchronously during session initialization + verify(requestTracker1).onSessionReady(session); + verify(requestTracker2).onSessionReady(session); + + assertThat(MyNodeStateListener.onSessionReadyCalled).isTrue(); + assertThat(MyNodeStateListener.onUpCalled).isTrue(); + + // SchemaChangeListener#onSessionReady is called asynchronously from AdminExecutor so we may + // have to wait a little + assertThat( + Uninterruptibles.awaitUninterruptibly( + MySchemaChangeListener.onSessionReadyLatch, 500, TimeUnit.MILLISECONDS)) + .isTrue(); + + assertThat(MyRequestTracker.onSessionReadyCalled).isTrue(); + } + + // CqlSession#close waits for all listener close methods to be called + verify(nodeListener1).close(); + verify(nodeListener2).close(); + + verify(schemaListener1).close(); + verify(schemaListener2).close(); + + verify(requestTracker1).close(); + verify(requestTracker2).close(); + + assertThat(MyNodeStateListener.closeCalled).isTrue(); + assertThat(MySchemaChangeListener.closeCalled).isTrue(); + assertThat(MyRequestTracker.closeCalled).isTrue(); + } + + public static class MyNodeStateListener extends SafeInitNodeStateListener { + + private static volatile boolean onSessionReadyCalled = false; + private static volatile boolean onUpCalled = false; + private static volatile boolean closeCalled = false; + + public MyNodeStateListener(@SuppressWarnings("unused") DriverContext ignored) { + super( + new NodeStateListenerBase() { + + @Override + public void onSessionReady(@NonNull Session session) { + onSessionReadyCalled = true; + } + + @Override + public void onUp(@NonNull Node node) { + onUpCalled = true; + } + + @Override + public void close() { + closeCalled = true; + } + }, + true); + } + } + + public static class MySchemaChangeListener extends SchemaChangeListenerBase { + + private static CountDownLatch onSessionReadyLatch = new CountDownLatch(1); + private static volatile boolean closeCalled = false; + + public MySchemaChangeListener(@SuppressWarnings("unused") DriverContext ignored) {} + + @Override + public void onSessionReady(@NonNull Session session) { + onSessionReadyLatch.countDown(); + } + + @Override + public void close() throws Exception { + closeCalled = true; + } + } + + public static class MyRequestTracker implements RequestTracker { + + private static volatile boolean onSessionReadyCalled = false; + private static volatile boolean closeCalled = false; + + public MyRequestTracker(@SuppressWarnings("unused") DriverContext ignored) {} + + @Override + public void onSessionReady(@NonNull Session session) { + onSessionReadyCalled = true; + } + + @Override + public void close() throws Exception { + closeCalled = true; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RemovedNodeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RemovedNodeIT.java new file mode 100644 index 00000000000..e0f33291544 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RemovedNodeIT.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.session; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeStateListener; +import com.datastax.oss.driver.api.core.metadata.token.TokenRange; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.internal.core.pool.ChannelPool; +import com.datastax.oss.driver.internal.core.session.DefaultSession; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.ClassRule; +import org.junit.Test; + +public class RemovedNodeIT { + + @ClassRule + public static final CustomCcmRule CCM_RULE = + CustomCcmRule.builder() + // We need 4 nodes to run this test against DSE, because it requires at least 3 nodes to + // maintain RF=3 for keyspace system_distributed + .withNodes(4) + .build(); + + @Test + public void should_signal_and_destroy_pool_when_node_gets_removed() { + RemovalListener removalListener = new RemovalListener(); + try (CqlSession session = + SessionUtils.newSession(CCM_RULE, null, removalListener, null, null)) { + assertThat(session.getMetadata().getTokenMap()).isPresent(); + Set tokenRanges = session.getMetadata().getTokenMap().get().getTokenRanges(); + assertThat(tokenRanges).hasSize(4); + CCM_RULE.getCcmBridge().decommission(2); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> removalListener.removedNode != null); + Map pools = ((DefaultSession) session).getPools(); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> !pools.containsKey(removalListener.removedNode)); + await() + .pollInterval(500, TimeUnit.MILLISECONDS) + .atMost(60, TimeUnit.SECONDS) + .until(() -> session.getMetadata().getTokenMap().get().getTokenRanges().size() == 3); + } + } + + static class RemovalListener implements NodeStateListener { + + volatile Node removedNode; + + @Override + public void onRemove(@NonNull Node node) { + removedNode = node; + } + + @Override + public void onAdd(@NonNull Node node) {} + + @Override + public void onUp(@NonNull Node node) {} + + @Override + public void onDown(@NonNull Node node) {} + + @Override + public void close() throws Exception {} + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/RequestProcessorIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java similarity index 78% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/RequestProcessorIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java index e695e5a616a..e2b3caeb1f4 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/RequestProcessorIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.session; +package com.datastax.oss.driver.core.session; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -37,10 +40,8 @@ import com.google.common.util.concurrent.Uninterruptibles; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @@ -65,28 +66,27 @@ @Category(ParallelizableTests.class) public class RequestProcessorIT { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @Rule public ExpectedException thrown = ExpectedException.none(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); public static final String KEY = "test"; @BeforeClass public static void setupSchema() { // table with clustering key where v1 == v0 * 2. - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder( "CREATE TABLE IF NOT EXISTS test (k text, v0 int, v1 int, PRIMARY KEY(k, v0))") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); for (int i = 0; i < 100; i++) { - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder("INSERT INTO test (k, v0, v1) VALUES (?, ?, ?)") @@ -97,14 +97,14 @@ public static void setupSchema() { private GuavaSession newSession(CqlIdentifier keyspace) { return GuavaSessionUtils.builder() - .addContactEndPoints(ccm.getContactPoints()) + .addContactEndPoints(CCM_RULE.getContactPoints()) .withKeyspace(keyspace) .build(); } @Test public void should_use_custom_request_processor_for_prepareAsync() throws Exception { - try (GuavaSession session = newSession(sessionRule.keyspace())) { + try (GuavaSession session = newSession(SESSION_RULE.keyspace())) { ListenableFuture preparedFuture = session.prepareAsync("select * from test"); @@ -123,7 +123,7 @@ public void should_use_custom_request_processor_for_prepareAsync() throws Except @Test public void should_use_custom_request_processor_for_handling_special_request_type() throws Exception { - try (GuavaSession session = newSession(sessionRule.keyspace())) { + try (GuavaSession session = newSession(SESSION_RULE.keyspace())) { // RequestProcessor executes "select v from test where k = " and returns v as Integer. int v1 = session.execute(new KeyRequest(5), KeyRequestProcessor.INT_TYPE); assertThat(v1).isEqualTo(10); // v1 = v0 * 2 @@ -136,7 +136,7 @@ public void should_use_custom_request_processor_for_handling_special_request_typ @Test public void should_use_custom_request_processor_for_executeAsync() throws Exception { - try (GuavaSession session = newSession(sessionRule.keyspace())) { + try (GuavaSession session = newSession(SESSION_RULE.keyspace())) { ListenableFuture future = session.executeAsync("select * from test"); AsyncResultSet result = Uninterruptibles.getUninterruptibly(future); assertThat(Iterables.size(result.currentPage())).isEqualTo(100); @@ -149,9 +149,14 @@ public void should_throw_illegal_argument_exception_if_no_matching_processor_fou // Since cluster does not have a processor registered for returning ListenableFuture, an // IllegalArgumentException // should be thrown. - thrown.expect(IllegalArgumentException.class); - sessionRule - .session() - .execute(SimpleStatement.newInstance("select * from test"), GuavaSession.ASYNC); + Throwable t = + catchThrowable( + () -> + SESSION_RULE + .session() + .execute( + SimpleStatement.newInstance("select * from test"), GuavaSession.ASYNC)); + + assertThat(t).isInstanceOf(IllegalArgumentException.class); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/ShutdownIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ShutdownIT.java similarity index 83% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/ShutdownIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/session/ShutdownIT.java index 7e24fac779e..7763f1ba866 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/session/ShutdownIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/session/ShutdownIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.session; +package com.datastax.oss.driver.core.session; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; @@ -42,7 +44,7 @@ public class ShutdownIT { @ClassRule - public static SimulacronRule simulacronRule = + public static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(1)); private static final String QUERY_STRING = "select * from foo"; @@ -52,10 +54,10 @@ public void should_fail_requests_when_session_is_closed() throws Exception { // Given // Prime with a bit of delay to increase the chance that a query will be aborted in flight when // we force-close the session - simulacronRule + SIMULACRON_RULE .cluster() .prime(when(QUERY_STRING).then(noRows()).delay(20, TimeUnit.MILLISECONDS)); - CqlSession session = SessionUtils.newSession(simulacronRule); + CqlSession session = SessionUtils.newSession(SIMULACRON_RULE); // When // Max out the in-flight requests on the connection (from a separate thread pool to get a bit of @@ -99,9 +101,9 @@ public void should_fail_requests_when_session_is_closed() throws Exception { AllNodesFailedException anfe = (AllNodesFailedException) error; // if there were 0 errors, its a NoNodeAvailableException which is // acceptable. - if (anfe.getErrors().size() > 0) { - assertThat(anfe.getErrors()).hasSize(1); - error = anfe.getErrors().values().iterator().next(); + if (anfe.getAllErrors().size() > 0) { + assertThat(anfe.getAllErrors()).hasSize(1); + error = anfe.getAllErrors().values().iterator().next().get(0); if (!(error instanceof IllegalStateException) && !error.getMessage().endsWith("is closing")) { unexpectedErrors.add(error.toString()); @@ -118,9 +120,9 @@ public void should_fail_requests_when_session_is_closed() throws Exception { } }); } - TimeUnit.MILLISECONDS.sleep(100); + TimeUnit.MILLISECONDS.sleep(1000); session.forceCloseAsync(); - assertThat(gotSessionClosedError.await(1, TimeUnit.SECONDS)) + assertThat(gotSessionClosedError.await(10, TimeUnit.SECONDS)) .as("Expected to get the 'Session is closed' error shortly after shutting down") .isTrue(); requestExecutor.shutdownNow(); @@ -131,7 +133,7 @@ public void should_fail_requests_when_session_is_closed() throws Exception { @Test public void should_handle_getting_closed_twice() { - CqlSession session = SessionUtils.newSession(simulacronRule); + CqlSession session = SessionUtils.newSession(SIMULACRON_RULE); session.close(); session.close(); } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/specex/SpeculativeExecutionIT.java similarity index 94% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/specex/SpeculativeExecutionIT.java index fa36c143b42..cc13c821b9e 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/specex/SpeculativeExecutionIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.specex; +package com.datastax.oss.driver.core.specex; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.isBootstrapping; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; @@ -29,6 +31,7 @@ import com.datastax.oss.driver.api.core.context.DriverContext; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter; @@ -51,22 +54,22 @@ public class SpeculativeExecutionIT { // Note: it looks like shorter delays cause precision issues with Netty timers private static final long SPECULATIVE_DELAY = 1000; - private static String QUERY_STRING = "select * from foo"; + private static final String QUERY_STRING = "select * from foo"; private static final SimpleStatement QUERY = SimpleStatement.newInstance(QUERY_STRING); // Shared across all tests methods. - public static @ClassRule SimulacronRule simulacron = + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(3)); - @SuppressWarnings("deprecation") private final QueryCounter counter = - QueryCounter.builder(simulacron.cluster()) + QueryCounter.builder(SIMULACRON_RULE.cluster()) .withFilter((l) -> l.getQuery().equals(QUERY_STRING)) .build(); @Before public void clear() { - simulacron.cluster().clearPrimes(true); + SIMULACRON_RULE.cluster().clearPrimes(true); } @Test @@ -307,7 +310,7 @@ public void should_not_speculatively_execute_when_defined_in_profile() { // Build a new Cluster instance for each test, because we need different configurations private CqlSession buildSession(int maxSpeculativeExecutions, long speculativeDelayMs) { return SessionUtils.newSession( - simulacron, + SIMULACRON_RULE, SessionUtils.configLoaderBuilder() .withDuration( DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofMillis(SPECULATIVE_DELAY * 10)) @@ -389,7 +392,7 @@ private CqlSession buildSessionWithProfile( builder = builder.startProfile("profile2").withString(DefaultDriverOption.REQUEST_CONSISTENCY, "ONE"); - CqlSession session = SessionUtils.newSession(simulacron, builder.build()); + CqlSession session = SessionUtils.newSession(SIMULACRON_RULE, builder.build()); // validate profile data DriverContext context = session.getContext(); @@ -428,6 +431,6 @@ private CqlSession buildSessionWithProfile( } private void primeNode(int id, PrimeDsl.PrimeBuilder primeBuilder) { - simulacron.cluster().node(id).prime(primeBuilder); + SIMULACRON_RULE.cluster().node(id).prime(primeBuilder); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java similarity index 75% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java index eb594fcb22f..e2e39be5cd6 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryHostnameValidationIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.ssl; +package com.datastax.oss.driver.core.ssl; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -27,7 +29,8 @@ public class DefaultSslEngineFactoryHostnameValidationIT { - @ClassRule public static CustomCcmRule ccm = CustomCcmRule.builder().withSslLocalhostCn().build(); + @ClassRule + public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslLocalhostCn().build(); /** * Ensures that SSL connectivity can be established with hostname validation enabled when the @@ -48,7 +51,7 @@ public void should_connect_if_hostname_validation_enabled_and_hostname_matches() DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { session.execute("select * from system.local"); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryIT.java new file mode 100644 index 00000000000..a2afeade3ce --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryIT.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.ssl; + +import com.datastax.oss.driver.api.core.AllNodesFailedException; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.assertions.Assertions; +import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; +import java.net.InetSocketAddress; +import org.junit.ClassRule; +import org.junit.Test; + +public class DefaultSslEngineFactoryIT { + + @ClassRule public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSsl().build(); + + @Test + public void should_connect_with_ssl() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) + .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PATH, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) + .build(); + + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_if_hostname_validation_enabled_and_hostname_does_not_match() { + // should not succeed as certificate does not have a CN that would match hostname, + // (unless hostname is node1). + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PATH, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_if_truststore_not_provided() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) + .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + session.execute("select * from system.local"); + } + } + + @Test(expected = AllNodesFailedException.class) + public void should_not_connect_if_not_using_ssl() { + try (CqlSession session = SessionUtils.newSession(CCM_RULE)) { + session.execute("select * from system.local"); + } + } + + public static class InstrumentedSslEngineFactory extends DefaultSslEngineFactory { + int countReverseLookups = 0; + int countNoLookups = 0; + + public InstrumentedSslEngineFactory(DriverContext driverContext) { + super(driverContext); + } + + @Override + protected String hostMaybeFromDnsReverseLookup(InetSocketAddress addr) { + countReverseLookups++; + return super.hostMaybeFromDnsReverseLookup(addr); + } + + @Override + protected String hostNoLookup(InetSocketAddress addr) { + countNoLookups++; + return super.hostNoLookup(addr); + } + }; + + @Test + public void should_respect_config_for_san_resolution() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withClass( + DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, InstrumentedSslEngineFactory.class) + .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PATH, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + InstrumentedSslEngineFactory ssl = + (InstrumentedSslEngineFactory) session.getContext().getSslEngineFactory().get(); + Assertions.assertThat(ssl.countReverseLookups).isGreaterThan(0); + Assertions.assertThat(ssl.countNoLookups).isEqualTo(0); + } + + loader = + SessionUtils.configLoaderBuilder() + .withClass( + DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, InstrumentedSslEngineFactory.class) + .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PATH, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()) + .withString( + DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, + CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) + .withBoolean(DefaultDriverOption.SSL_ALLOW_DNS_REVERSE_LOOKUP_SAN, false) + .build(); + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { + InstrumentedSslEngineFactory ssl = + (InstrumentedSslEngineFactory) session.getContext().getSslEngineFactory().get(); + Assertions.assertThat(ssl.countReverseLookups).isEqualTo(0); + Assertions.assertThat(ssl.countNoLookups).isGreaterThan(0); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java similarity index 70% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java index 98eeff1cab8..fc6c67c0307 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.ssl; +package com.datastax.oss.driver.core.ssl; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -30,7 +32,8 @@ @Category(IsolatedTests.class) public class DefaultSslEngineFactoryPropertyBasedIT { - @ClassRule public static CustomCcmRule ccm = CustomCcmRule.builder().withSslLocalhostCn().build(); + @ClassRule + public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslLocalhostCn().build(); @Test public void should_connect_with_ssl() { @@ -42,7 +45,7 @@ public void should_connect_with_ssl() { SessionUtils.configLoaderBuilder() .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { session.execute("select * from system.local"); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java similarity index 74% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java index e0fcdb81503..43f2b9d5a99 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryPropertyBasedWithClientAuthIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.ssl; +package com.datastax.oss.driver.core.ssl; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; @@ -30,7 +32,8 @@ @Category(IsolatedTests.class) public class DefaultSslEngineFactoryPropertyBasedWithClientAuthIT { - @ClassRule public static CustomCcmRule ccm = CustomCcmRule.builder().withSslAuth().build(); + @ClassRule + public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslAuth().build(); @Test public void should_connect_with_ssl_using_client_auth() { @@ -47,7 +50,7 @@ public void should_connect_with_ssl_using_client_auth() { .withClass(DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class) .withBoolean(DefaultDriverOption.SSL_HOSTNAME_VALIDATION, false) .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { session.execute("select * from system.local"); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java similarity index 78% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java index b0fd67b91ec..ab98dcc953d 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/DefaultSslEngineFactoryWithClientAuthIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.ssl; +package com.datastax.oss.driver.core.ssl; import com.datastax.oss.driver.api.core.AllNodesFailedException; import com.datastax.oss.driver.api.core.CqlSession; @@ -28,7 +30,8 @@ public class DefaultSslEngineFactoryWithClientAuthIT { - @ClassRule public static CustomCcmRule ccm = CustomCcmRule.builder().withSslAuth().build(); + @ClassRule + public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSslAuth().build(); @Test public void should_connect_with_ssl_using_client_auth() { @@ -49,7 +52,7 @@ public void should_connect_with_ssl_using_client_auth() { DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { session.execute("select * from system.local"); } } @@ -67,7 +70,7 @@ public void should_not_connect_with_ssl_using_client_auth_if_keystore_not_set() DefaultDriverOption.SSL_TRUSTSTORE_PASSWORD, CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD) .build(); - try (CqlSession session = SessionUtils.newSession(ccm, loader)) { + try (CqlSession session = SessionUtils.newSession(CCM_RULE, loader)) { session.execute("select * from system.local"); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/ProgrammaticSslIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/ProgrammaticSslIT.java new file mode 100644 index 00000000000..148c7c91baa --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/ssl/ProgrammaticSslIT.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.ssl; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.ssl.ProgrammaticSslEngineFactory; +import com.datastax.oss.driver.api.core.ssl.SslEngineFactory; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.security.KeyStore; +import java.security.SecureRandom; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import org.junit.ClassRule; +import org.junit.Test; + +public class ProgrammaticSslIT { + + @ClassRule public static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().withSsl().build(); + + @Test + public void should_connect_with_programmatic_factory() { + SslEngineFactory factory = new ProgrammaticSslEngineFactory(createSslContext()); + try (CqlSession session = + (CqlSession) + SessionUtils.baseBuilder() + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withSslEngineFactory(factory) + .build()) { + session.execute("select * from system.local"); + } + } + + @Test + public void should_connect_with_programmatic_ssl_context() { + SSLContext sslContext = createSslContext(); + try (CqlSession session = + (CqlSession) + SessionUtils.baseBuilder() + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withSslContext(sslContext) + .build()) { + session.execute("select * from system.local"); + } + } + + private static SSLContext createSslContext() { + try { + SSLContext context = SSLContext.getInstance("SSL"); + TrustManagerFactory tmf = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + try (InputStream tsf = + Files.newInputStream( + Paths.get(CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_FILE.getAbsolutePath()))) { + KeyStore ts = KeyStore.getInstance("JKS"); + char[] password = CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD.toCharArray(); + ts.load(tsf, password); + tmf.init(ts); + } + KeyManagerFactory kmf = + KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + try (InputStream ksf = + Files.newInputStream( + Paths.get(CcmBridge.DEFAULT_CLIENT_KEYSTORE_FILE.getAbsolutePath()))) { + KeyStore ks = KeyStore.getInstance("JKS"); + char[] password = CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD.toCharArray(); + ks.load(ksf, password); + kmf.init(ks, password); + } + context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + return context; + } catch (Exception e) { + throw new AssertionError("Unexpected error while creating SSL context", e); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/throttling/ThrottlingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/throttling/ThrottlingIT.java similarity index 54% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/throttling/ThrottlingIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/throttling/ThrottlingIT.java index 5e2acc06fd2..6fa1a37355b 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/throttling/ThrottlingIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/throttling/ThrottlingIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,41 +15,47 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.throttling; +package com.datastax.oss.driver.core.throttling; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.RequestThrottlingException; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.session.throttling.ConcurrencyLimitingRequestThrottler; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; +import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.experimental.categories.Category; +@Category(ParallelizableTests.class) public class ThrottlingIT { private static final String QUERY = "select * from foo"; + private static final int maxConcurrentRequests = 10; + private static final int maxQueueSize = 10; @Rule public SimulacronRule simulacron = new SimulacronRule(ClusterSpec.builder().withNodes(1)); - @Rule public ExpectedException thrown = ExpectedException.none(); - @Test - public void should_reject_request_when_throttling_by_concurrency() { + private DriverConfigLoader loader = null; + @Before + public void setUp() { // Add a delay so that requests don't complete during the test simulacron .cluster() .prime(PrimeDsl.when(QUERY).then(PrimeDsl.noRows()).delay(5, TimeUnit.SECONDS)); - - int maxConcurrentRequests = 10; - int maxQueueSize = 10; - - DriverConfigLoader loader = + loader = SessionUtils.configLoaderBuilder() .withClass( DefaultDriverOption.REQUEST_THROTTLER_CLASS, @@ -57,7 +65,10 @@ public void should_reject_request_when_throttling_by_concurrency() { maxConcurrentRequests) .withInt(DefaultDriverOption.REQUEST_THROTTLER_MAX_QUEUE_SIZE, maxQueueSize) .build(); + } + @Test + public void should_reject_request_when_throttling_by_concurrency() { try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { // Saturate the session and fill the queue @@ -66,11 +77,27 @@ public void should_reject_request_when_throttling_by_concurrency() { } // The next query should be rejected - thrown.expect(RequestThrottlingException.class); - thrown.expectMessage( - "The session has reached its maximum capacity " - + "(concurrent requests: 10, queue size: 10)"); + Throwable t = catchThrowable(() -> session.execute(QUERY)); + + assertThat(t) + .isInstanceOf(RequestThrottlingException.class) + .hasMessage( + "The session has reached its maximum capacity " + + "(concurrent requests: 10, queue size: 10)"); + } + } + + @Test + public void should_propagate_cancel_to_throttler() { + try (CqlSession session = SessionUtils.newSession(simulacron, loader)) { + + // Try to saturate the session and fill the queue + for (int i = 0; i < maxConcurrentRequests + maxQueueSize; i++) { + CompletionStage future = session.executeAsync(QUERY); + future.toCompletableFuture().cancel(true); + } + // The next query should be successful, because the previous queries were cancelled session.execute(QUERY); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestIdGeneratorIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestIdGeneratorIT.java new file mode 100644 index 00000000000..516a62bb1f7 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestIdGeneratorIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.tracker; + +import static com.datastax.oss.driver.Assertions.assertThatStage; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.tracker.RequestIdGenerator; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class RequestIdGeneratorIT { + private CcmRule ccmRule = CcmRule.getInstance(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule); + + @Test + public void should_write_uuid_to_custom_payload_with_key() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString(DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, "UuidRequestIdGenerator") + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + String query = "SELECT * FROM system.local"; + ResultSet rs = session.execute(query); + ByteBuffer id = rs.getExecutionInfo().getRequest().getCustomPayload().get("request-id"); + assertThat(id.remaining()).isEqualTo(73); + } + } + + @Test + public void should_write_default_request_id_to_custom_payload_with_key() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString( + DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, "W3CContextRequestIdGenerator") + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + String query = "SELECT * FROM system.local"; + ResultSet rs = session.execute(query); + ByteBuffer id = rs.getExecutionInfo().getRequest().getCustomPayload().get("request-id"); + assertThat(id.remaining()).isEqualTo(55); + } + } + + @Test + public void should_use_customized_request_id_generator() { + RequestIdGenerator myRequestIdGenerator = + new RequestIdGenerator() { + @Override + public String getSessionRequestId() { + return "123"; + } + + @Override + public String getNodeRequestId(@NonNull Request statement, @NonNull String parentId) { + return "456"; + } + + @Override + public Statement getDecoratedStatement( + @NonNull Statement statement, @NonNull String requestId) { + Map customPayload = + NullAllowingImmutableMap.builder() + .putAll(statement.getCustomPayload()) + .put("trace_key", ByteBuffer.wrap(requestId.getBytes(StandardCharsets.UTF_8))) + .build(); + return statement.setCustomPayload(customPayload); + } + }; + try (CqlSession session = + (CqlSession) + SessionUtils.baseBuilder() + .addContactEndPoints(ccmRule.getContactPoints()) + .withRequestIdGenerator(myRequestIdGenerator) + .build()) { + String query = "SELECT * FROM system.local"; + ResultSet rs = session.execute(query); + ByteBuffer id = rs.getExecutionInfo().getRequest().getCustomPayload().get("trace_key"); + assertThat(id).isEqualTo(ByteBuffer.wrap("456".getBytes(StandardCharsets.UTF_8))); + } + } + + @Test + public void should_not_write_id_to_custom_payload_when_key_is_not_set() { + DriverConfigLoader loader = SessionUtils.configLoaderBuilder().build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + String query = "SELECT * FROM system.local"; + ResultSet rs = session.execute(query); + assertThat(rs.getExecutionInfo().getRequest().getCustomPayload().get("request-id")).isNull(); + } + } + + @Test + public void should_succeed_with_null_value_in_custom_payload() { + DriverConfigLoader loader = + SessionUtils.configLoaderBuilder() + .withString( + DefaultDriverOption.REQUEST_ID_GENERATOR_CLASS, "W3CContextRequestIdGenerator") + .build(); + try (CqlSession session = SessionUtils.newSession(ccmRule, loader)) { + String query = "SELECT * FROM system.local"; + Map customPayload = + new NullAllowingImmutableMap.Builder(1).put("my_key", null).build(); + SimpleStatement statement = + SimpleStatement.newInstance(query).setCustomPayload(customPayload); + assertThatStage(session.executeAsync(statement)).isSuccess(); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/tracker/RequestLoggerIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestLoggerIT.java similarity index 86% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/tracker/RequestLoggerIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestLoggerIT.java index 6ab1010c7ea..ae2c46fe4a0 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/tracker/RequestLoggerIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestLoggerIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.tracker; +package com.datastax.oss.driver.core.tracker; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError; @@ -34,17 +36,18 @@ import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.servererrors.ServerError; import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.tracker.RequestLogger; import com.datastax.oss.simulacron.common.cluster.ClusterSpec; import com.datastax.oss.simulacron.common.codec.ConsistencyLevel; import java.time.Duration; +import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -53,6 +56,7 @@ import org.junit.Before; import org.junit.Rule; import org.junit.Test; +import org.junit.experimental.categories.Category; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; import org.junit.runner.RunWith; @@ -65,22 +69,31 @@ import org.slf4j.LoggerFactory; @RunWith(MockitoJUnitRunner.class) +@Category(ParallelizableTests.class) public class RequestLoggerIT { private static final Pattern LOG_PREFIX_PER_REQUEST = Pattern.compile("\\[s\\d*\\|\\d*]"); + + @SuppressWarnings("UnnecessaryLambda") private static final Predicate WITH_PER_REQUEST_PREFIX = log -> LOG_PREFIX_PER_REQUEST.matcher(log).lookingAt(); + private static final Pattern LOG_PREFIX_WITH_EXECUTION_NUMBER = Pattern.compile("\\[s\\d*\\|\\d*\\|\\d*]"); + + @SuppressWarnings("UnnecessaryLambda") private static final Predicate WITH_EXECUTION_PREFIX = log -> LOG_PREFIX_WITH_EXECUTION_NUMBER.matcher(log).lookingAt(); private static final String QUERY = "SELECT release_version FROM system.local"; - private SimulacronRule simulacronRule = new SimulacronRule(ClusterSpec.builder().withNodes(3)); + private final SimulacronRule simulacronRule = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); private final DriverConfigLoader requestLoader = SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.REQUEST_TRACKER_CLASS, RequestLogger.class) + .withClassList( + DefaultDriverOption.REQUEST_TRACKER_CLASSES, + Collections.singletonList(RequestLogger.class)) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, true) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, true) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, true) @@ -107,12 +120,14 @@ public class RequestLoggerIT { .withBoolean(DefaultDriverOption.REQUEST_LOGGER_STACK_TRACES, false) .build(); - private SessionRule sessionRuleRequest = + private final SessionRule sessionRuleRequest = SessionRule.builder(simulacronRule).withConfigLoader(requestLoader).build(); private final DriverConfigLoader nodeLoader = SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.REQUEST_TRACKER_CLASS, RequestNodeLoggerExample.class) + .withClassList( + DefaultDriverOption.REQUEST_TRACKER_CLASSES, + Collections.singletonList(RequestNodeLoggerExample.class)) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, true) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SLOW_ENABLED, true) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, true) @@ -142,14 +157,16 @@ public class RequestLoggerIT { DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, SortingLoadBalancingPolicy.class) .build(); - private SessionRule sessionRuleNode = + private final SessionRule sessionRuleNode = SessionRule.builder(simulacronRule).withConfigLoader(nodeLoader).build(); - private SessionRule sessionRuleDefaults = + private final SessionRule sessionRuleDefaults = SessionRule.builder(simulacronRule) .withConfigLoader( SessionUtils.configLoaderBuilder() - .withClass(DefaultDriverOption.REQUEST_TRACKER_CLASS, RequestLogger.class) + .withClassList( + DefaultDriverOption.REQUEST_TRACKER_CLASSES, + Collections.singletonList(RequestLogger.class)) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED, true) .withBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED, true) .startProfile("low-threshold") @@ -199,7 +216,7 @@ public void should_log_successful_request() { sessionRuleRequest.session().execute(QUERY); // Then - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); assertThat(loggingEventCaptor.getValue().getFormattedMessage()) .contains("Success", "[0 values]", QUERY) .matches(WITH_PER_REQUEST_PREFIX); @@ -214,7 +231,7 @@ public void should_log_successful_request_with_defaults() { sessionRuleDefaults.session().execute(QUERY); // Then - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); assertThat(loggingEventCaptor.getValue().getFormattedMessage()) .contains("Success", "[0 values]", QUERY) .matches(WITH_PER_REQUEST_PREFIX); @@ -234,7 +251,7 @@ public void should_log_failed_request_with_stack_trace() { } // Then - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); ILoggingEvent log = loggingEventCaptor.getValue(); assertThat(log.getFormattedMessage()) .contains("Error", "[0 values]", QUERY) @@ -257,7 +274,7 @@ public void should_log_failed_request_with_stack_trace_with_defaults() { } // Then - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); ILoggingEvent log = loggingEventCaptor.getValue(); assertThat(log.getFormattedMessage()) .contains("Error", "[0 values]", QUERY, ServerError.class.getName()) @@ -280,7 +297,7 @@ public void should_log_failed_request_without_stack_trace() { } // Then - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); ILoggingEvent log = loggingEventCaptor.getValue(); assertThat(log.getFormattedMessage()) .contains("Error", "[0 values]", QUERY, ServerError.class.getName()) @@ -299,7 +316,7 @@ public void should_log_slow_request() { .execute(SimpleStatement.builder(QUERY).setExecutionProfileName("low-threshold").build()); // Then - verify(appender, timeout(500)).doAppend(loggingEventCaptor.capture()); + verify(appender, timeout(5000)).doAppend(loggingEventCaptor.capture()); assertThat(loggingEventCaptor.getValue().getFormattedMessage()) .contains("Slow", "[0 values]", QUERY) .matches(WITH_PER_REQUEST_PREFIX); @@ -344,7 +361,7 @@ public void should_log_failed_nodes_on_successful_request() { .execute(SimpleStatement.builder(QUERY).setExecutionProfileName("sorting-lbp").build()); // Then - verify(appender, new Timeout(500, VerificationModeFactory.times(3))) + verify(appender, new Timeout(5000, VerificationModeFactory.times(3))) .doAppend(loggingEventCaptor.capture()); List events = loggingEventCaptor.getAllValues(); assertThat(events.get(0).getFormattedMessage()) @@ -374,10 +391,10 @@ public void should_log_successful_nodes_on_successful_request() { .prime(when(QUERY).then(rows().row("release_version", "3.0.0"))); // When - ResultSet set = sessionRuleNode.session().execute(QUERY); + sessionRuleNode.session().execute(QUERY); // Then - verify(appender, new Timeout(500, VerificationModeFactory.times(2))) + verify(appender, new Timeout(5000, VerificationModeFactory.times(2))) .doAppend(loggingEventCaptor.capture()); List events = loggingEventCaptor.getAllValues(); assertThat(events.get(0).getFormattedMessage()) diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/tracker/RequestNodeLoggerExample.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestNodeLoggerExample.java similarity index 84% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/tracker/RequestNodeLoggerExample.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestNodeLoggerExample.java index 77441987700..8eb2fb80a73 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/tracker/RequestNodeLoggerExample.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/tracker/RequestNodeLoggerExample.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.tracker; +package com.datastax.oss.driver.core.tracker; import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; @@ -37,7 +39,7 @@ public void onNodeError( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String logPrefix) { + @NonNull String nodeRequestLogPrefix) { if (!executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_ERROR_ENABLED)) { return; } @@ -64,7 +66,7 @@ public void onNodeError( maxValues, maxValueLength, showStackTraces, - logPrefix); + nodeRequestLogPrefix); } @Override @@ -73,7 +75,7 @@ public void onNodeSuccess( long latencyNanos, @NonNull DriverExecutionProfile executionProfile, @NonNull Node node, - @NonNull String logPrefix) { + @NonNull String nodeRequestLogPrefix) { boolean successEnabled = executionProfile.getBoolean(DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED); boolean slowEnabled = @@ -112,6 +114,6 @@ public void onNodeSuccess( showValues, maxValues, maxValueLength, - logPrefix); + nodeRequestLogPrefix); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/CqlIntToStringCodec.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/CqlIntToStringCodec.java new file mode 100644 index 00000000000..f509439fe35 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/CqlIntToStringCodec.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.type.codec; + +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * A sample user codec implementation that we use in our tests. + * + *

        It maps a CQL string to a Java string containing its textual representation. + */ +public class CqlIntToStringCodec extends MappingCodec { + + public CqlIntToStringCodec() { + super(TypeCodecs.INT, GenericType.STRING); + } + + @Nullable + @Override + protected String innerToOuter(@Nullable Integer value) { + return value == null ? null : value.toString(); + } + + @Nullable + @Override + protected Integer outerToInner(@Nullable String value) { + return value == null ? null : Integer.parseInt(value); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/ExtraTypeCodecsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/ExtraTypeCodecsIT.java new file mode 100644 index 00000000000..c5db0376efb --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/ExtraTypeCodecsIT.java @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.core.type.codec; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import java.util.stream.Stream; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class ExtraTypeCodecsIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private enum TableField { + cql_text("text_value", "text"), + cql_int("integer_value", "int"), + cql_vector("vector_value", "vector"), + cql_list_of_text("list_of_text_value", "list"), + cql_timestamp("timestamp_value", "timestamp"), + cql_boolean("boolean_value", "boolean"), + ; + + final String name; + final String ty; + + TableField(String name, String ty) { + this.name = name; + this.ty = ty; + } + + private String definition() { + return String.format("%s %s", name, ty); + } + } + + @BeforeClass + public static void setupSchema() { + List fieldDefinitions = new ArrayList<>(); + fieldDefinitions.add("key uuid PRIMARY KEY"); + Stream.of(TableField.values()) + .forEach( + tf -> { + // TODO: Move this check to BackendRequirementRule once JAVA-3069 is resolved. + if (tf == TableField.cql_vector + && CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) < 0) { + // don't add vector type before cassandra version 5.0 + return; + } + fieldDefinitions.add(tf.definition()); + }); + SESSION_RULE + .session() + .execute( + SimpleStatement.builder( + String.format( + "CREATE TABLE IF NOT EXISTS extra_type_codecs_it (%s)", + String.join(", ", fieldDefinitions))) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + + private void insertAndRead(TableField field, T value, TypeCodec codec) { + CqlSession session = SESSION_RULE.session(); + // write value under new key using provided codec + UUID key = UUID.randomUUID(); + + PreparedStatement preparedInsert = + session.prepare( + SimpleStatement.builder( + String.format( + "INSERT INTO extra_type_codecs_it (key, %s) VALUES (?, ?)", field.name)) + .build()); + BoundStatement boundInsert = + preparedInsert + .boundStatementBuilder() + .setUuid("key", key) + .set(field.name, value, codec) + .build(); + session.execute(boundInsert); + + // read value using provided codec and assert result + PreparedStatement preparedSelect = + session.prepare( + SimpleStatement.builder( + String.format("SELECT %s FROM extra_type_codecs_it WHERE key = ?", field.name)) + .build()); + BoundStatement boundSelect = preparedSelect.boundStatementBuilder().setUuid("key", key).build(); + assertThat(session.execute(boundSelect).one().get(field.name, codec)).isEqualTo(value); + } + + @Test + public void enum_names_of() { + insertAndRead( + TableField.cql_text, TestEnum.value1, ExtraTypeCodecs.enumNamesOf(TestEnum.class)); + } + + @Test + public void enum_ordinals_of() { + insertAndRead( + TableField.cql_int, TestEnum.value1, ExtraTypeCodecs.enumOrdinalsOf(TestEnum.class)); + } + + // Also requires -Dccm.branch=vsearch and the ability to build that branch locally + @BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "5.0.0") + @Test + public void float_to_vector_array() { + // @BackRequirement on test methods that use @ClassRule to configure CcmRule require @Rule + // BackendRequirementRule included with fix JAVA-3069. Until then we will ignore this test with + // an assume. + Assume.assumeTrue( + "Requires Cassandra 5.0 or greater", + CCM_RULE.getCassandraVersion().compareTo(Version.parse("5.0")) >= 0); + insertAndRead( + TableField.cql_vector, + new float[] {1.1f, 0f, Float.NaN}, + ExtraTypeCodecs.floatVectorToArray(3)); + } + + @Test + public void json_java_class() { + insertAndRead( + TableField.cql_text, + new TestJsonAnnotatedPojo("example", Arrays.asList(1, 2, 3)), + ExtraTypeCodecs.json(TestJsonAnnotatedPojo.class)); + } + + @Test + public void json_java_class_and_object_mapper() { + insertAndRead( + TableField.cql_text, + TestPojo.create(1, "abc", "def"), + ExtraTypeCodecs.json(TestPojo.class, new ObjectMapper())); + } + + @Test + public void list_to_array_of() { + insertAndRead( + TableField.cql_list_of_text, + new String[] {"hello", "kitty"}, + ExtraTypeCodecs.listToArrayOf(TypeCodecs.TEXT)); + } + + @Test + public void local_timestamp_at() { + ZoneId systemZoneId = ZoneId.systemDefault(); + insertAndRead( + TableField.cql_timestamp, + LocalDateTime.now(systemZoneId).truncatedTo(ChronoUnit.MILLIS), + ExtraTypeCodecs.localTimestampAt(systemZoneId)); + } + + @Test + public void optional_of() { + insertAndRead( + TableField.cql_boolean, Optional.empty(), ExtraTypeCodecs.optionalOf(TypeCodecs.BOOLEAN)); + insertAndRead( + TableField.cql_boolean, Optional.of(true), ExtraTypeCodecs.optionalOf(TypeCodecs.BOOLEAN)); + } + + @Test + public void timestamp_at() { + ZoneId systemZoneId = ZoneId.systemDefault(); + insertAndRead( + TableField.cql_timestamp, + Instant.now().truncatedTo(ChronoUnit.MILLIS), + ExtraTypeCodecs.timestampAt(systemZoneId)); + } + + @Test + public void timestamp_millis_at() { + ZoneId systemZoneId = ZoneId.systemDefault(); + insertAndRead( + TableField.cql_timestamp, + Instant.now().toEpochMilli(), + ExtraTypeCodecs.timestampMillisAt(systemZoneId)); + } + + @Test + public void zoned_timestamp_at() { + ZoneId systemZoneId = ZoneId.systemDefault(); + insertAndRead( + TableField.cql_timestamp, + ZonedDateTime.now(systemZoneId).truncatedTo(ChronoUnit.MILLIS), + ExtraTypeCodecs.zonedTimestampAt(systemZoneId)); + } + + private enum TestEnum { + value1, + value2, + value3, + } + + // Public for JSON serialization + public static final class TestJsonAnnotatedPojo { + public final String info; + public final List values; + + @JsonCreator + public TestJsonAnnotatedPojo( + @JsonProperty("info") String info, @JsonProperty("values") List values) { + this.info = info; + this.values = values; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestJsonAnnotatedPojo testJsonAnnotatedPojo = (TestJsonAnnotatedPojo) o; + return Objects.equals(info, testJsonAnnotatedPojo.info) + && Objects.equals(values, testJsonAnnotatedPojo.values); + } + + @Override + public int hashCode() { + return Objects.hash(info, values); + } + } + + public static final class TestPojo { + public int id; + public String[] messages; + + public static TestPojo create(int id, String... messages) { + TestPojo obj = new TestPojo(); + obj.id = id; + obj.messages = messages; + return obj; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestPojo testPojo = (TestPojo) o; + return id == testPojo.id && Arrays.equals(messages, testPojo.messages); + } + + @Override + public int hashCode() { + int result = Objects.hash(id); + result = 31 * result + Arrays.hashCode(messages); + return result; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistryIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/registry/CodecRegistryIT.java similarity index 54% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistryIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/registry/CodecRegistryIT.java index e10dc9135cb..74472e8bab9 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistryIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/core/type/codec/registry/CodecRegistryIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.core.type.codec.registry; +package com.datastax.oss.driver.core.type.codec.registry; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.ProtocolVersion; @@ -24,34 +27,40 @@ import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.UdtValue; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; +import com.datastax.oss.driver.api.core.type.codec.MappingCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import com.datastax.oss.driver.api.core.type.reflect.GenericTypeParameter; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.type.codec.IntCodec; +import com.datastax.oss.driver.internal.core.type.codec.UdtCodec; +import com.datastax.oss.driver.internal.core.type.codec.extras.OptionalCodec; +import com.google.common.collect.ImmutableMap; import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; import java.nio.ByteBuffer; -import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; -import java.util.function.Predicate; import org.assertj.core.util.Maps; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestName; import org.junit.rules.TestRule; @@ -59,33 +68,50 @@ @Category(ParallelizableTests.class) public class CodecRegistryIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); @Rule public TestName name = new TestName(); - @Rule public ExpectedException thrown = ExpectedException.none(); - @BeforeClass public static void createSchema() { - // table with simple primary key, single cell. - sessionRule - .session() - .execute( - SimpleStatement.builder("CREATE TABLE IF NOT EXISTS test (k text primary key, v int)") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - // table with map value - sessionRule - .session() - .execute( - SimpleStatement.builder( - "CREATE TABLE IF NOT EXISTS test2 (k0 text, k1 int, v map, primary key (k0, k1))") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); + SchemaChangeSynchronizer.withLock( + () -> { + // table with simple primary key, single cell. + SESSION_RULE + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test (k text primary key, v int)") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + // table with map value + SESSION_RULE + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test2 (k0 text, k1 int, v map, primary key (k0, k1))") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + // table with UDT + SESSION_RULE + .session() + .execute( + SimpleStatement.builder("CREATE TYPE IF NOT EXISTS coordinates (x int, y int)") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + SESSION_RULE + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE IF NOT EXISTS test3 (k0 text, k1 int, v map>, primary key (k0, k1))") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + }); } // A simple codec that allows float values to be used for cassandra int column type. @@ -130,25 +156,32 @@ public Float parse(String value) { @Test public void should_throw_exception_if_no_codec_registered_for_type_set() { PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO test (k, v) values (?, ?)"); - - thrown.expect(CodecNotFoundException.class); + SESSION_RULE.session().prepare("INSERT INTO test (k, v) values (?, ?)"); // float value for int column should not work since no applicable codec. - prepared.boundStatementBuilder().setString(0, name.getMethodName()).setFloat(1, 3.14f).build(); + Throwable t = + catchThrowable( + () -> + prepared + .boundStatementBuilder() + .setString(0, name.getMethodName()) + .setFloat(1, 3.14f) + .build()); + + assertThat(t).isInstanceOf(CodecNotFoundException.class); } @Test public void should_throw_exception_if_no_codec_registered_for_type_get() { PreparedStatement prepared = - sessionRule.session().prepare("INSERT INTO test (k, v) values (?, ?)"); + SESSION_RULE.session().prepare("INSERT INTO test (k, v) values (?, ?)"); BoundStatement insert = prepared.boundStatementBuilder().setString(0, name.getMethodName()).setInt(1, 2).build(); - sessionRule.session().execute(insert); + SESSION_RULE.session().execute(insert); ResultSet result = - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.builder("SELECT v from test where k = ?") @@ -161,9 +194,9 @@ public void should_throw_exception_if_no_codec_registered_for_type_get() { // should not be able to access int column as float as no codec is registered to handle that. Row row = rows.iterator().next(); - thrown.expect(CodecNotFoundException.class); + Throwable t = catchThrowable(() -> assertThat(row.getFloat("v")).isEqualTo(3.0f)); - assertThat(row.getFloat("v")).isEqualTo(3.0f); + assertThat(t).isInstanceOf(CodecNotFoundException.class); } @Test @@ -173,8 +206,8 @@ public void should_be_able_to_register_and_use_custom_codec() { (CqlSession) SessionUtils.baseBuilder() .addTypeCodecs(new FloatCIntCodec()) - .addContactEndPoints(ccm.getContactPoints()) - .withKeyspace(sessionRule.keyspace()) + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withKeyspace(SESSION_RULE.keyspace()) .build()) { PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (?, ?)"); @@ -204,81 +237,40 @@ public void should_be_able_to_register_and_use_custom_codec() { } } - // TODO: consider moving this into source as it could be generally useful. - private abstract static class MappingCodec implements TypeCodec { - - private final GenericType javaType; - private final TypeCodec innerCodec; - - MappingCodec(TypeCodec innerCodec, GenericType javaType) { - this.innerCodec = innerCodec; - this.javaType = javaType; - } - - @NonNull - @Override - public GenericType getJavaType() { - return javaType; - } - - @NonNull - @Override - public DataType getCqlType() { - return innerCodec.getCqlType(); - } - - @Override - public ByteBuffer encode(O value, @NonNull ProtocolVersion protocolVersion) { - return innerCodec.encode(encode(value), protocolVersion); - } - - @Override - public O decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return decode(innerCodec.decode(bytes, protocolVersion)); - } - - @NonNull - @Override - public String format(O value) { - return value == null ? null : innerCodec.format(encode(value)); - } - - @Override - public O parse(String value) { - return value == null || value.isEmpty() || value.equalsIgnoreCase("NULL") - ? null - : decode(innerCodec.parse(value)); - } - - protected abstract O decode(I value); + @Test + public void should_register_custom_codec_at_runtime() { + // Still create a separate session because we don't want to interfere with other tests + try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace())) { - protected abstract I encode(O value); - } + MutableCodecRegistry registry = + (MutableCodecRegistry) session.getContext().getCodecRegistry(); + registry.register(new FloatCIntCodec()); - private static class OptionalCodec extends MappingCodec, T> { + PreparedStatement prepared = session.prepare("INSERT INTO test (k, v) values (?, ?)"); - // in cassandra, empty collections are considered null and vise versa. - Predicate isAbsent = - (i) -> - i == null - || ((i instanceof Collection && ((Collection) i).isEmpty())) - || ((i instanceof Map) && ((Map) i).isEmpty()); + // float value for int column should work. + BoundStatement insert = + prepared + .boundStatementBuilder() + .setString(0, name.getMethodName()) + .setFloat(1, 3.14f) + .build(); + session.execute(insert); - OptionalCodec(TypeCodec innerCodec) { - super( - innerCodec, - new GenericType>() {}.where( - new GenericTypeParameter() {}, innerCodec.getJavaType())); - } + ResultSet result = + session.execute( + SimpleStatement.builder("SELECT v from test where k = ?") + .addPositionalValue(name.getMethodName()) + .build()); - @Override - protected Optional decode(T value) { - return isAbsent.test(value) ? Optional.empty() : Optional.of(value); - } + List rows = result.all(); + assertThat(rows).hasSize(1); - @Override - protected T encode(Optional value) { - return value.orElse(null); + // should be able to retrieve value back as float, some precision is lost due to going from + // int -> float. + Row row = rows.iterator().next(); + assertThat(row.getFloat("v")).isEqualTo(3.0f); + assertThat(row.getFloat(0)).isEqualTo(3.0f); } } @@ -294,8 +286,8 @@ public void should_be_able_to_register_and_use_custom_codec_with_generic_type() (CqlSession) SessionUtils.baseBuilder() .addTypeCodecs(optionalMapCodec, mapWithOptionalValueCodec) - .addContactEndPoints(ccm.getContactPoints()) - .withKeyspace(sessionRule.keyspace()) + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withKeyspace(SESSION_RULE.keyspace()) .build()) { PreparedStatement prepared = session.prepare("INSERT INTO test2 (k0, k1, v) values (?, ?, ?)"); @@ -381,8 +373,8 @@ public void should_be_able_to_handle_empty_collections() { try (CqlSession session = (CqlSession) SessionUtils.baseBuilder() - .addContactEndPoints(ccm.getContactPoints()) - .withKeyspace(sessionRule.keyspace()) + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withKeyspace(SESSION_RULE.keyspace()) .build()) { // Using prepared statements (CQL type is known) @@ -424,4 +416,120 @@ public void should_be_able_to_handle_empty_collections() { assertThat(row2.getMap(0, Integer.class, String.class)).isEmpty(); } } + + private static final class Coordinates { + + public final int x; + public final int y; + + public Coordinates(int x, int y) { + this.x = x; + this.y = y; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Coordinates that = (Coordinates) o; + return this.x == that.x && this.y == that.y; + } + + @Override + public int hashCode() { + return Objects.hash(x, y); + } + } + + private static class CoordinatesCodec extends MappingCodec { + + public CoordinatesCodec(@NonNull TypeCodec innerCodec) { + super(innerCodec, GenericType.of(Coordinates.class)); + } + + @NonNull + @Override + public UserDefinedType getCqlType() { + return (UserDefinedType) super.getCqlType(); + } + + @Nullable + @Override + protected Coordinates innerToOuter(@Nullable UdtValue value) { + return value == null ? null : new Coordinates(value.getInt("x"), value.getInt("y")); + } + + @Nullable + @Override + protected UdtValue outerToInner(@Nullable Coordinates value) { + return value == null + ? null + : getCqlType().newValue().setInt("x", value.x).setInt("y", value.y); + } + } + + @Test + public void should_register_and_use_custom_codec_for_user_defined_type() { + + Map coordinatesMap = ImmutableMap.of("home", new Coordinates(12, 34)); + GenericType> coordinatesMapType = + GenericType.mapOf(String.class, Coordinates.class); + + // Still create a separate session because we don't want to interfere with other tests + try (CqlSession session = SessionUtils.newSession(CCM_RULE, SESSION_RULE.keyspace())) { + + // register the mapping codec for UDT coordinates + UserDefinedType coordinatesUdt = + session + .getMetadata() + .getKeyspace(SESSION_RULE.keyspace()) + .flatMap(ks -> ks.getUserDefinedType("coordinates")) + .orElseThrow(IllegalStateException::new); + MutableCodecRegistry codecRegistry = + (MutableCodecRegistry) session.getContext().getCodecRegistry(); + + // Retrieve the inner codec + TypeCodec innerCodec = codecRegistry.codecFor(coordinatesUdt); + assertThat(innerCodec).isInstanceOf(UdtCodec.class); + + // Create the "outer" codec and register it + CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); + codecRegistry.register(coordinatesCodec); + + // Test that the codec will be used to create on-the-fly codecs + assertThat(codecRegistry.codecFor(Coordinates.class)).isSameAs(coordinatesCodec); + assertThat(codecRegistry.codecFor(coordinatesMapType).accepts(coordinatesMap)).isTrue(); + + // test insertion + PreparedStatement prepared = + session.prepare("INSERT INTO test3 (k0, k1, v) values (?, ?, ?)"); + BoundStatement insert = + prepared + .boundStatementBuilder() + .setString(0, name.getMethodName()) + .setInt(1, 0) + .set( + 2, + coordinatesMap, + coordinatesMapType) // use java type so has to be looked up in registry. + .build(); + session.execute(insert); + + // test retrieval + ResultSet result = + session.execute( + SimpleStatement.builder("SELECT v from test3 where k0 = ? AND k1 = ?") + .addPositionalValues(name.getMethodName(), 0) + .build()); + List rows = result.all(); + assertThat(rows).hasSize(1); + Row row = rows.get(0); + assertThat(row.get(0, coordinatesMapType)).isEqualTo(coordinatesMap); + assertThat(row.getMap(0, String.class, Coordinates.class)).isEqualTo(coordinatesMap); + } + } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java index 48f74fcb8df..082858803af 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java index 1fe041fcfe7..fe20d0fdc8a 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,40 +20,18 @@ import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; +import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.example.guava.internal.DefaultGuavaSession; import com.datastax.oss.driver.example.guava.internal.GuavaDriverContext; import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.List; -import java.util.Map; -import java.util.function.Predicate; public class GuavaSessionBuilder extends SessionBuilder { @Override protected DriverContext buildContext( - DriverConfigLoader configLoader, - List> typeCodecs, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - RequestTracker requestTracker, - Map localDatacenters, - Map> nodeFilters, - ClassLoader classLoader) { - return new GuavaDriverContext( - configLoader, - typeCodecs, - nodeStateListener, - schemaChangeListener, - requestTracker, - localDatacenters, - nodeFilters, - classLoader); + DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { + return new GuavaDriverContext(configLoader, programmaticArguments); } @Override diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java index c379d313572..afa8439c487 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java index a63da30d69b..7418526520b 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java index 3ecf6a1b128..692ad951187 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,11 +20,7 @@ import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.cql.PrepareRequest; import com.datastax.oss.driver.api.core.cql.Statement; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.NodeStateListener; -import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; -import com.datastax.oss.driver.api.core.tracker.RequestTracker; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.core.session.ProgrammaticArguments; import com.datastax.oss.driver.example.guava.api.GuavaSession; import com.datastax.oss.driver.internal.core.context.DefaultDriverContext; import com.datastax.oss.driver.internal.core.cql.CqlPrepareAsyncProcessor; @@ -30,9 +28,7 @@ import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; import com.datastax.oss.driver.internal.core.cql.CqlRequestSyncProcessor; import com.datastax.oss.driver.internal.core.session.RequestProcessorRegistry; -import java.util.List; -import java.util.Map; -import java.util.function.Predicate; +import java.util.Optional; /** * A Custom {@link DefaultDriverContext} that overrides {@link #getRequestProcessorRegistry()} to @@ -41,23 +37,8 @@ public class GuavaDriverContext extends DefaultDriverContext { public GuavaDriverContext( - DriverConfigLoader configLoader, - List> typeCodecs, - NodeStateListener nodeStateListener, - SchemaChangeListener schemaChangeListener, - RequestTracker requestTracker, - Map localDatacenters, - Map> nodeFilters, - ClassLoader classLoader) { - super( - configLoader, - typeCodecs, - nodeStateListener, - schemaChangeListener, - requestTracker, - localDatacenters, - nodeFilters, - classLoader); + DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { + super(configLoader, programmaticArguments); } @Override @@ -66,7 +47,8 @@ public RequestProcessorRegistry buildRequestProcessorRegistry() { // use GuavaRequestAsyncProcessor to return ListenableFutures in async methods. CqlRequestAsyncProcessor cqlRequestAsyncProcessor = new CqlRequestAsyncProcessor(); - CqlPrepareAsyncProcessor cqlPrepareAsyncProcessor = new CqlPrepareAsyncProcessor(); + CqlPrepareAsyncProcessor cqlPrepareAsyncProcessor = + new CqlPrepareAsyncProcessor(Optional.of(this)); CqlRequestSyncProcessor cqlRequestSyncProcessor = new CqlRequestSyncProcessor(cqlRequestAsyncProcessor); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java index 4f9a6484731..20cb60323e9 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java index 7ab96a5a8b0..ef582cce1b9 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java index 949db88e389..1fcfb9dd3b2 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/KeyRequestProcessor.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +20,8 @@ import com.datastax.oss.driver.api.core.cql.AsyncResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.session.Request; -import com.datastax.oss.driver.api.core.session.RequestProcessorIT; import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.core.session.RequestProcessorIT; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; import com.datastax.oss.driver.internal.core.cql.CqlRequestAsyncProcessor; import com.datastax.oss.driver.internal.core.session.DefaultSession; diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java deleted file mode 100644 index f52d139f1b4..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/CqlIntToStringCodec.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.internal.core.type.codec; - -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.type.DataType; -import com.datastax.oss.driver.api.core.type.DataTypes; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import edu.umd.cs.findbugs.annotations.NonNull; -import java.nio.ByteBuffer; - -/** - * A sample user codec implementation that we use in our tests. - * - *

        It maps a CQL string to a Java string containing its textual representation. - */ -public class CqlIntToStringCodec implements TypeCodec { - - @NonNull - @Override - public GenericType getJavaType() { - return GenericType.STRING; - } - - @NonNull - @Override - public DataType getCqlType() { - return DataTypes.INT; - } - - @Override - public ByteBuffer encode(String value, @NonNull ProtocolVersion protocolVersion) { - if (value == null) { - return null; - } else { - return TypeCodecs.INT.encode(Integer.parseInt(value), protocolVersion); - } - } - - @Override - public String decode(ByteBuffer bytes, @NonNull ProtocolVersion protocolVersion) { - return TypeCodecs.INT.decode(bytes, protocolVersion).toString(); - } - - @NonNull - @Override - public String format(String value) { - throw new UnsupportedOperationException("Not implemented for this test"); - } - - @Override - public String parse(String value) { - throw new UnsupportedOperationException("Not implemented for this test"); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecIT.java new file mode 100644 index 00000000000..804a078bbe0 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/type/codec/UdtCodecIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.type.codec; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import java.util.Objects; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class UdtCodecIT { + + private CcmRule ccmRule = CcmRule.getInstance(); + + private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Test + public void should_decoding_udt_be_backward_compatible() { + CqlSession session = sessionRule.session(); + session.execute("CREATE TYPE test_type_1 (a text, b int)"); + session.execute("CREATE TABLE test_table_1 (e int primary key, f frozen)"); + // insert a row using version 1 of the UDT schema + session.execute("INSERT INTO test_table_1(e, f) VALUES(1, {a: 'a', b: 1})"); + UserDefinedType udt = + session + .getMetadata() + .getKeyspace(sessionRule.keyspace()) + .flatMap(ks -> ks.getUserDefinedType("test_type_1")) + .orElseThrow(IllegalStateException::new); + TypeCodec oldCodec = session.getContext().getCodecRegistry().codecFor(udt); + // update UDT schema + session.execute("ALTER TYPE test_type_1 add i text"); + // insert a row using version 2 of the UDT schema + session.execute("INSERT INTO test_table_1(e, f) VALUES(2, {a: 'b', b: 2, i: 'b'})"); + Row row = + Objects.requireNonNull(session.execute("SELECT f FROM test_table_1 WHERE e = ?", 2).one()); + // Try to read new row with old codec. Using row.getUdtValue() would not cause any issues, + // because new codec will be automatically registered (using all 3 attributes). + // If application leverages generic row.get(String, Codec) method, data reading with old codec + // should + // be backward-compatible. + UdtValue value = Objects.requireNonNull((UdtValue) row.get("f", oldCodec)); + assertThat(value.getString("a")).isEqualTo("b"); + assertThat(value.getInt("b")).isEqualTo(2); + assertThatThrownBy(() -> value.getString("i")).hasMessage("i is not a field in this UDT"); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationCcmIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationCcmIT.java new file mode 100644 index 00000000000..e0f058b00d0 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationCcmIT.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.concurrent; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousAsyncResultSet; +import com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingITBase; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DriverTimeoutException; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.IsolatedTests; +import java.time.Duration; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.blockhound.BlockHound; +import reactor.core.publisher.Flux; +import reactor.core.scheduler.Schedulers; +import reactor.test.StepVerifier; + +/** + * This test exercises the driver with BlockHound installed and tests that the rules defined in + * {@link DriverBlockHoundIntegration} are being applied, and especially when continuous paging is + * used. + */ +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.1.0", + description = "Continuous paging is only available from 5.1.0 onwards") +@Category(IsolatedTests.class) +public class DriverBlockHoundIntegrationCcmIT extends ContinuousPagingITBase { + + private static final Logger LOGGER = + LoggerFactory.getLogger(DriverBlockHoundIntegrationCcmIT.class); + + private static final CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); + + // Note: Insights monitoring will be detected by BlockHound, but the error is swallowed and + // logged by DefaultSession.SingleThreaded.notifyListeners, so it's not necessary to explicitly + // disable Insights here. + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + @BeforeClass + public static void setUp() { + try { + BlockHound.install(); + } catch (Throwable t) { + LOGGER.error("BlockHound could not be installed", t); + fail("BlockHound could not be installed", t); + } + initialize(SESSION_RULE.session(), SESSION_RULE.slowProfile()); + } + + @Test + public void should_not_detect_blocking_call_with_continuous_paging() { + CqlSession session = SESSION_RULE.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + Flux rows = + Flux.range(0, 10) + .flatMap( + i -> + Flux.fromIterable(session.executeContinuously(statement)) + .subscribeOn(Schedulers.parallel())); + StepVerifier.create(rows).expectNextCount(1000).expectComplete().verify(); + } + + /** Copied from com.datastax.dse.driver.api.core.cql.continuous.ContinuousPagingIT. */ + @Test + public void should_not_detect_blocking_call_with_continuous_paging_when_timeout() + throws Exception { + CqlSession session = SESSION_RULE.session(); + SimpleStatement statement = SimpleStatement.newInstance("SELECT v from test where k=?", KEY); + // Throttle server at a page per second and set client timeout much lower so that the client + // will experience a timeout. + // Note that this might not be perfect if there are pauses in the JVM and the timeout + // doesn't fire soon enough. + DriverExecutionProfile profile = + session + .getContext() + .getConfig() + .getDefaultProfile() + .withInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE, 10) + .withInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND, 1) + .withDuration( + DseDriverOption.CONTINUOUS_PAGING_TIMEOUT_OTHER_PAGES, Duration.ofMillis(100)); + CompletionStage future = + session.executeContinuouslyAsync(statement.setExecutionProfile(profile)); + ContinuousAsyncResultSet pagingResult = CompletableFutures.getUninterruptibly(future); + try { + pagingResult.fetchNextPage().toCompletableFuture().get(); + fail("Expected a timeout"); + } catch (ExecutionException e) { + assertThat(e.getCause()) + .isInstanceOf(DriverTimeoutException.class) + .hasMessageContaining("Timed out waiting for page 2"); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationIT.java new file mode 100644 index 00000000000..278e3081ea1 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/internal/core/util/concurrent/DriverBlockHoundIntegrationIT.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.core.util.concurrent; + +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.rows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Fail.fail; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.uuid.Uuids; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.IsolatedTests; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import java.util.UUID; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.blockhound.BlockHound; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; +import reactor.test.StepVerifier; + +/** + * This test exercises the driver with BlockHound installed and tests that the rules defined in + * {@link DriverBlockHoundIntegration} are being applied. + */ +@Category(IsolatedTests.class) +public class DriverBlockHoundIntegrationIT { + + private static final Logger LOGGER = LoggerFactory.getLogger(DriverBlockHoundIntegrationIT.class); + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + @BeforeClass + public static void setUp() { + try { + BlockHound.install(); + } catch (Throwable t) { + LOGGER.error("BlockHound could not be installed", t); + fail("BlockHound could not be installed", t); + } + } + + @Before + public void setup() { + SIMULACRON_RULE.cluster().prime(when("SELECT c1, c2 FROM ks.t1").then(rows().row("foo", 42))); + } + + @Test + @SuppressWarnings("BlockingMethodInNonBlockingContext") + public void should_detect_blocking_call() { + // this is just to make sure the detection mechanism is properly installed + Mono blockingPublisher = + Mono.fromCallable( + () -> { + Thread.sleep(1); + return 0; + }) + .subscribeOn(Schedulers.parallel()); + StepVerifier.create(blockingPublisher) + .expectErrorMatches(e -> e instanceof Error && e.getMessage().contains("Blocking call!")) + .verify(); + } + + @Test + public void should_not_detect_blocking_call_on_asynchronous_execution() { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { + Flux rows = + Flux.range(0, 1000) + .flatMap( + i -> + Flux.from(session.executeReactive("SELECT c1, c2 FROM ks.t1")) + .subscribeOn(Schedulers.parallel())); + StepVerifier.create(rows).expectNextCount(1000).expectComplete().verify(); + } + } + + @Test + public void should_not_detect_blocking_call_on_asynchronous_execution_prepared() { + try (CqlSession session = SessionUtils.newSession(SIMULACRON_RULE)) { + Flux rows = + Mono.fromCompletionStage(() -> session.prepareAsync("SELECT c1, c2 FROM ks.t1")) + .flatMapMany( + ps -> + Flux.range(0, 1000) + .map(i -> ps.bind()) + .flatMap( + bs -> + Flux.from(session.executeReactive(bs)) + .subscribeOn(Schedulers.parallel()))); + StepVerifier.create(rows).expectNextCount(1000).expectComplete().verify(); + } + } + + @Test + public void should_not_detect_blocking_call_on_random_uuid_generation() { + Flux uuids = + Flux.create( + sink -> { + for (int i = 0; i < 1_000_000; ++i) { + sink.next(Uuids.random()); + } + sink.complete(); + }) + .subscribeOn(Schedulers.parallel()); + StepVerifier.create(uuids).expectNextCount(1_000_000).expectComplete().verify(); + } + + @Test + public void should_not_detect_blocking_call_on_time_based_uuid_generation() { + Flux uuids = + Flux.create( + sink -> { + for (int i = 0; i < 1_000_000; ++i) { + sink.next(Uuids.timeBased()); + } + sink.complete(); + }) + .subscribeOn(Schedulers.parallel()); + StepVerifier.create(uuids).expectNextCount(1_000_000).expectComplete().verify(); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java index 6ce28ec2419..5ef66f15bfb 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ComputedIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,8 @@ package com.datastax.oss.driver.mapper; import static com.datastax.oss.driver.assertions.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.assertj.core.data.Offset.offset; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -49,23 +53,20 @@ import java.util.concurrent.atomic.AtomicInteger; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @Category(ParallelizableTests.class) public class ComputedIT { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @Rule public ExpectedException thrown = ExpectedException.none(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static TestMapper mapper; @@ -73,13 +74,13 @@ public class ComputedIT { @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); for (String query : ImmutableList.of( "CREATE TABLE computed_entity(id int, c_id int, v int, primary key (id, c_id))")) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } mapper = new ComputedIT_TestMapperBuilder(session).build(); @@ -87,7 +88,7 @@ public static void setup() { @Test public void should_not_include_computed_values_in_insert() { - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); ComputedEntity entity = new ComputedEntity(key, 1, 2); @@ -101,7 +102,7 @@ public void should_not_include_computed_values_in_insert() { @Test public void should_return_computed_values_in_select() { - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); long time = System.currentTimeMillis() - 1000; @@ -112,14 +113,14 @@ public void should_return_computed_values_in_select() { assertThat(retrievedValue.getId()).isEqualTo(key); assertThat(retrievedValue.getcId()).isEqualTo(1); assertThat(retrievedValue.getV()).isEqualTo(2); - assertThat(retrievedValue.getTtl()).isEqualTo(3600); + assertThat(retrievedValue.getTtl()).isCloseTo(3600, offset(10)); assertThat(retrievedValue.getWritetime()).isEqualTo(time); } @Test public void should_not_include_computed_values_in_delete() { // should not be the case since delete operates on primary key.. - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); ComputedEntity entity = new ComputedEntity(key, 1, 2); @@ -135,10 +136,10 @@ public void should_not_include_computed_values_in_delete() { @Test public void should_not_include_computed_values_in_SetEntity() { - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); PreparedStatement preparedStatement = session.prepare("INSERT INTO computed_entity (id, c_id, v) VALUES (?, ?, ?)"); BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); @@ -156,14 +157,14 @@ public void should_not_include_computed_values_in_SetEntity() { @Test public void should_return_computed_values_in_GetEntity() { - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); long time = System.currentTimeMillis() - 1000; ComputedEntity entity = new ComputedEntity(key, 1, 2); computedDao.saveWithTime(entity, 3600, time); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); /* * Query with the computed values included. @@ -191,20 +192,20 @@ public void should_return_computed_values_in_GetEntity() { assertThat(retrievedValue.getV()).isEqualTo(2); // these should be set - assertThat(retrievedValue.getTtl()).isEqualTo(3600); + assertThat(retrievedValue.getTtl()).isCloseTo(3600, offset(10)); assertThat(retrievedValue.getWritetime()).isEqualTo(time); } @Test public void should_fail_if_alias_does_not_match_cqlName() { - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); long time = System.currentTimeMillis() - 1000; ComputedEntity entity = new ComputedEntity(key, 1, 2); computedDao.saveWithTime(entity, 3600, time); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); /* * Query with the computed values included. @@ -223,14 +224,16 @@ public void should_fail_if_alias_does_not_match_cqlName() { 1)); // should raise an exception as 'writetime' is not found in result set. - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("writetime is not a column in this row"); - computedDao.get(result.one()); + Throwable t = catchThrowable(() -> computedDao.get(result.one())); + + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("writetime is not a column in this row"); } @Test public void should_return_computed_values_in_query() { - ComputedDao computedDao = mapper.computedDao(sessionRule.keyspace()); + ComputedDao computedDao = mapper.computedDao(SESSION_RULE.keyspace()); int key = keyProvider.incrementAndGet(); long time = System.currentTimeMillis() - 1000; @@ -243,7 +246,7 @@ public void should_return_computed_values_in_query() { assertThat(retrievedValue.getV()).isEqualTo(2); // these should be set - assertThat(retrievedValue.getTtl()).isEqualTo(3600); + assertThat(retrievedValue.getTtl()).isCloseTo(3600, offset(10)); assertThat(retrievedValue.getWritetime()).isEqualTo(time); } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/CustomResultTypeIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/CustomResultTypeIT.java new file mode 100644 index 00000000000..c218dcfcc86 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/CustomResultTypeIT.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Delete; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.Query; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.annotations.Update; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ListenableFuture; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class CustomResultTypeIT extends InventoryITBase { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static ProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + + for (String query : createStatements(CCM_RULE)) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); + } + + InventoryMapper mapper = InventoryMapper.builder(SESSION_RULE.session()).build(); + dao = mapper.productDao(SESSION_RULE.keyspace()); + } + + @Test + public void should_use_custom_result_for_insert_method() + throws ExecutionException, InterruptedException { + + ListenableFuture insertFuture = dao.insert(FLAMETHROWER); + insertFuture.get(); + + Row row = SESSION_RULE.session().execute("SELECT id FROM product").one(); + UUID insertedId = row.getUuid(0); + assertThat(insertedId).isEqualTo(FLAMETHROWER.getId()); + } + + @Test + public void should_use_custom_result_for_select_method() + throws ExecutionException, InterruptedException { + + dao.insert(FLAMETHROWER).get(); + + ListenableFuture selectFuture = dao.select(FLAMETHROWER.getId()); + Product selectedProduct = selectFuture.get(); + assertThat(selectedProduct).isEqualTo(FLAMETHROWER); + } + + @Test + public void should_use_custom_result_for_update_method() + throws ExecutionException, InterruptedException { + + dao.insert(FLAMETHROWER).get(); + + Product productToUpdate = dao.select(FLAMETHROWER.getId()).get(); + productToUpdate.setDescription("changed description"); + ListenableFuture updateFuture = dao.update(productToUpdate); + updateFuture.get(); + + Product selectedProduct = dao.select(FLAMETHROWER.getId()).get(); + assertThat(selectedProduct.getDescription()).isEqualTo("changed description"); + } + + @Test + public void should_use_custom_result_for_delete_method() + throws ExecutionException, InterruptedException { + dao.insert(FLAMETHROWER).get(); + + ListenableFuture deleteFuture = dao.delete(FLAMETHROWER); + deleteFuture.get(); + + Product selectedProduct = dao.select(FLAMETHROWER.getId()).get(); + assertThat(selectedProduct).isNull(); + } + + @Test + public void should_use_custom_result_for_query_method() + throws ExecutionException, InterruptedException { + dao.insert(FLAMETHROWER).get(); + + ListenableFuture deleteFuture = dao.deleteById(FLAMETHROWER.getId()); + deleteFuture.get(); + + Product selectedProduct = dao.select(FLAMETHROWER.getId()).get(); + assertThat(selectedProduct).isNull(); + } + + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface ListenableFutureDao { + + @Select + ListenableFuture select(UUID id); + + @Update + ListenableFuture update(EntityT entity); + + @Insert + ListenableFuture insert(EntityT entity); + + @Delete + ListenableFuture delete(EntityT entity); + } + + @Dao + public interface ProductDao extends ListenableFutureDao { + + // We could do this easier with @Delete, but the goal here is to test @Query + @Query("DELETE FROM ${keyspaceId}.product WHERE id = :id") + ListenableFuture deleteById(UUID id); + } + + @Mapper + public interface InventoryMapper { + + @DaoFactory + ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + + static MapperBuilder builder(CqlSession session) { + return new CustomResultTypeIT_InventoryMapperBuilder(session); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java index 9f3c7238919..30a808e87a9 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultKeyspaceIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +26,7 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.mapper.MapperBuilder; import com.datastax.oss.driver.api.mapper.MapperException; import com.datastax.oss.driver.api.mapper.annotations.Dao; import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; @@ -53,28 +56,28 @@ @Category(ParallelizableTests.class) public class DefaultKeyspaceIT { private static final String DEFAULT_KEYSPACE = "default_keyspace"; - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionWithNoKeyspaceRule = - SessionRule.builder(ccm).withKeyspace(false).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - private static InventoryMapper mapper; + private static final SessionRule SESSION_WITH_NO_KEYSPACE_RULE = + SessionRule.builder(CCM_RULE).withKeyspace(false).build(); @ClassRule - public static TestRule chain = - RuleChain.outerRule(ccm).around(sessionRule).around(sessionWithNoKeyspaceRule); + public static final TestRule chain = + RuleChain.outerRule(CCM_RULE).around(SESSION_RULE).around(SESSION_WITH_NO_KEYSPACE_RULE); + + private static InventoryMapper mapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder( String.format( "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", DEFAULT_KEYSPACE)) - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); session.execute( @@ -82,19 +85,19 @@ public static void setup() { String.format( "CREATE TABLE %s.product_simple_default_ks(id uuid PRIMARY KEY, description text)", DEFAULT_KEYSPACE)) - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); session.execute( SimpleStatement.builder( "CREATE TABLE product_simple_without_ks(id uuid PRIMARY KEY, description text)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); session.execute( SimpleStatement.builder( "CREATE TABLE product_simple_default_ks(id uuid PRIMARY KEY, description text)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); mapper = new DefaultKeyspaceIT_InventoryMapperBuilder(session).build(); @@ -120,12 +123,13 @@ public void should_fail_to_insert_if_default_ks_and_dao_ks_not_provided() { assertThatThrownBy( () -> { InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder(sessionRule.session()) + new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder(SESSION_RULE.session()) + .withCustomState(MapperBuilder.SCHEMA_VALIDATION_ENABLED_SETTING, false) .build(); mapper.productDaoDefaultKsNotSet(); }) - .isInstanceOf(InvalidQueryException.class) - .hasMessageMatching("unconfigured (columnfamily|table) product_simple_default_ks_not_set"); + .isInstanceOf(InvalidQueryException.class); + // don't check the error message, as it's not consistent across Cassandra/DSE versions } @Test @@ -147,7 +151,7 @@ public void should_insert_preferring_dao_factory_ks_over_entity_default_ks() { // Given ProductSimpleDefaultKs product = new ProductSimpleDefaultKs(UUID.randomUUID(), "desc_1"); ProductSimpleDaoDefaultKs dao = - mapper.productDaoEntityDefaultOverridden(sessionRule.keyspace()); + mapper.productDaoEntityDefaultOverridden(SESSION_RULE.keyspace()); assertThat(dao.findById(product.id)).isNull(); // When @@ -167,7 +171,7 @@ public void should_fail_dao_initialization_if_keyspace_not_specified() { // entity has no keyspace InventoryMapperKsNotSet mapper = new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder( - sessionWithNoKeyspaceRule.session()) + SESSION_WITH_NO_KEYSPACE_RULE.session()) .build(); mapper.productDaoDefaultKsNotSet(); }) @@ -186,7 +190,8 @@ public void should_initialize_dao_if_keyspace_not_specified_but_not_needed() { // entity has no keyspace // but dao methods don't require keyspace (GetEntity, SetEntity) InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder(sessionWithNoKeyspaceRule.session()) + new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder( + SESSION_WITH_NO_KEYSPACE_RULE.session()) .build(); mapper.productDaoGetAndSetOnly(); } @@ -194,20 +199,22 @@ public void should_initialize_dao_if_keyspace_not_specified_but_not_needed() { @Test public void should_initialize_dao_if_default_ks_provided() { InventoryMapper mapper = - new DefaultKeyspaceIT_InventoryMapperBuilder(sessionWithNoKeyspaceRule.session()).build(); + new DefaultKeyspaceIT_InventoryMapperBuilder(SESSION_WITH_NO_KEYSPACE_RULE.session()) + .build(); // session has no keyspace, but entity does mapper.productDaoDefaultKs(); - mapper.productDaoEntityDefaultOverridden(sessionRule.keyspace()); + mapper.productDaoEntityDefaultOverridden(SESSION_RULE.keyspace()); } @Test public void should_initialize_dao_if_dao_ks_provided() { InventoryMapperKsNotSet mapper = - new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder(sessionWithNoKeyspaceRule.session()) + new DefaultKeyspaceIT_InventoryMapperKsNotSetBuilder( + SESSION_WITH_NO_KEYSPACE_RULE.session()) .build(); // session has no keyspace, but dao has parameter mapper.productDaoDefaultKsNotSetOverridden( - sessionRule.keyspace(), CqlIdentifier.fromCql("product_simple_default_ks")); + SESSION_RULE.keyspace(), CqlIdentifier.fromCql("product_simple_default_ks")); } @Mapper @@ -294,12 +301,16 @@ public void setDescription(String description) { } @Override - public boolean equals(Object o) { - - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProductSimpleDefaultKs that = (ProductSimpleDefaultKs) o; - return Objects.equals(id, that.id) && Objects.equals(description, that.description); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof ProductSimpleDefaultKs) { + ProductSimpleDefaultKs that = (ProductSimpleDefaultKs) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.description, that.description); + } else { + return false; + } } @Override @@ -342,12 +353,16 @@ public void setDescription(String description) { } @Override - public boolean equals(Object o) { - - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProductSimpleDefaultKsNotSet that = (ProductSimpleDefaultKsNotSet) o; - return Objects.equals(id, that.id) && Objects.equals(description, that.description); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof ProductSimpleDefaultKsNotSet) { + ProductSimpleDefaultKsNotSet that = (ProductSimpleDefaultKsNotSet) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.description, that.description); + } else { + return false; + } } @Override @@ -390,12 +405,16 @@ public void setDescription(String description) { } @Override - public boolean equals(Object o) { - - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProductSimpleWithoutKs that = (ProductSimpleWithoutKs) o; - return Objects.equals(id, that.id) && Objects.equals(description, that.description); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof ProductSimpleWithoutKs) { + ProductSimpleWithoutKs that = (ProductSimpleWithoutKs) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.description, that.description); + } else { + return false; + } } @Override diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java index dc2b707e3d6..fc88b5bbf7f 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DefaultNullSavingStrategyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,8 +37,9 @@ import com.datastax.oss.driver.api.mapper.annotations.SetEntity; import com.datastax.oss.driver.api.mapper.annotations.Update; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import java.util.function.BiConsumer; @@ -53,27 +56,32 @@ * DefaultNullSavingStrategy} annotation. */ @Category(ParallelizableTests.class) -@CassandraRequirement(min = "2.2", description = "support for unset values") +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.2", + description = "support for unset values") public class DefaultNullSavingStrategyIT { - private static CcmRule ccm = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static TestMapper mapper; private static PreparedStatement prepared; @BeforeClass public static void createSchema() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder("CREATE TABLE foo(k int PRIMARY KEY, v int)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); mapper = new DefaultNullSavingStrategyIT_TestMapperBuilder(session).build(); - prepared = sessionRule.session().prepare("INSERT INTO foo (k, v) values (:k, :v)"); + prepared = SESSION_RULE.session().prepare("INSERT INTO foo (k, v) values (:k, :v)"); } @Test @@ -142,17 +150,17 @@ private void assertSetEntityStrategy( Foo foo = new Foo(1, null); BoundStatementBuilder builder = prepared.boundStatementBuilder(); daoMethod.accept(builder, foo); - sessionRule.session().execute(builder.build()); + SESSION_RULE.session().execute(builder.build()); validateData(expectedStrategy); } private void reset() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute("INSERT INTO foo (k, v) VALUES (1, 1)"); } private void validateData(NullSavingStrategy expectedStrategy) { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session.execute("SELECT v FROM foo WHERE k = 1").one(); switch (expectedStrategy) { case DO_NOT_SET: diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java index 087d1f5ae50..03e3597501c 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,6 +23,7 @@ import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.PagingIterable; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; @@ -35,9 +38,10 @@ import com.datastax.oss.driver.api.mapper.annotations.Mapper; import com.datastax.oss.driver.api.mapper.annotations.Select; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; -import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -46,18 +50,22 @@ import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; -import org.junit.experimental.categories.Category; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -@Category(ParallelizableTests.class) +// Do not run LWT tests in parallel because they may interfere. Tests operate on the same row. +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.0", + description = ">= in WHERE clause not supported in legacy versions") public class DeleteIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); + private static CustomCcmRule CCM_RULE = CustomCcmRule.builder().build(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; @@ -65,16 +73,16 @@ public class DeleteIT extends InventoryITBase { @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); - for (String query : createStatements(ccm)) { + for (String query : createStatements(CCM_RULE)) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } InventoryMapper inventoryMapper = new DeleteIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); - saleDao = inventoryMapper.productSaleDao(sessionRule.keyspace()); + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); + saleDao = inventoryMapper.productSaleDao(SESSION_RULE.keyspace()); } @Before @@ -169,6 +177,21 @@ public void should_delete_with_condition() { assertThat(dao.findById(id)).isNull(); } + @Test + public void should_delete_with_condition_statement() { + UUID id = FLAMETHROWER.getId(); + assertThat(dao.findById(id)).isNotNull(); + + BoundStatement bs = dao.deleteIfDescriptionMatchesStatement(id, "foo"); + ResultSet rs = SESSION_RULE.session().execute(bs); + assertThat(rs.wasApplied()).isFalse(); + assertThat(rs.one().getString("description")).isEqualTo(FLAMETHROWER.getDescription()); + + rs = dao.deleteIfDescriptionMatches(id, FLAMETHROWER.getDescription()); + assertThat(rs.wasApplied()).isTrue(); + assertThat(dao.findById(id)).isNull(); + } + @Test public void should_delete_with_condition_asynchronously() { UUID id = FLAMETHROWER.getId(); @@ -193,6 +216,13 @@ public void should_delete_by_partition_key() { assertThat(saleDao.all().all()).containsOnly(FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); } + @Test + public void should_delete_by_partition_key_statement() { + // should delete FLAMETHROWER_SALE_[1-4] + SESSION_RULE.session().execute(saleDao.deleteByIdForDayStatement(FLAMETHROWER.getId(), DATE_1)); + assertThat(saleDao.all().all()).containsOnly(FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); + } + @Test public void should_delete_by_partition_key_and_partial_clustering() { // should delete FLAMETHROWER_SALE_{1,3,4] @@ -201,6 +231,16 @@ public void should_delete_by_partition_key_and_partial_clustering() { .containsOnly(FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); } + @Test + public void should_delete_by_partition_key_and_partial_clustering_statement() { + // should delete FLAMETHROWER_SALE_{1,3,4] + SESSION_RULE + .session() + .execute(saleDao.deleteByIdForCustomerStatement(FLAMETHROWER.getId(), DATE_1, 1)); + assertThat(saleDao.all().all()) + .containsOnly(FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); + } + @Test public void should_delete_by_primary_key_sales() { // should delete FLAMETHROWER_SALE_2 @@ -215,6 +255,23 @@ public void should_delete_by_primary_key_sales() { MP3_DOWNLOAD_SALE_1); } + @Test + public void should_delete_by_primary_key_sales_statement() { + // should delete FLAMETHROWER_SALE_2 + SESSION_RULE + .session() + .execute( + saleDao.deleteByIdForCustomerAtTimeStatement( + FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs())); + assertThat(saleDao.all().all()) + .containsOnly( + FLAMETHROWER_SALE_1, + FLAMETHROWER_SALE_3, + FLAMETHROWER_SALE_4, + FLAMETHROWER_SALE_5, + MP3_DOWNLOAD_SALE_1); + } + @Test public void should_delete_if_price_matches() { ResultSet result = @@ -233,6 +290,26 @@ public void should_delete_if_price_matches() { assertThat(result.wasApplied()).isTrue(); } + @Test + public void should_delete_if_price_matchesStatement() { + BoundStatement bs = + saleDao.deleteIfPriceMatchesStatement( + FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs(), 250.0); + ResultSet result = SESSION_RULE.session().execute(bs); + + assertThat(result.wasApplied()).isFalse(); + Row row = result.one(); + assertThat(row).isNotNull(); + assertThat(row.getDouble("price")).isEqualTo(500.0); + + bs = + saleDao.deleteIfPriceMatchesStatement( + FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs(), 500.0); + result = SESSION_RULE.session().execute(bs); + + assertThat(result.wasApplied()).isTrue(); + } + @Test public void should_delete_if_exists_sales() { assertThat(saleDao.deleteIfExists(FLAMETHROWER.getId(), DATE_1, 2, FLAMETHROWER_SALE_2.getTs())) @@ -257,6 +334,24 @@ public void should_delete_within_time_range() { FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); } + @Test + public void should_delete_within_time_range_statement() { + // should delete FLAMETHROWER_SALE_{1,3}, but not 4 because range ends before + SESSION_RULE + .session() + .execute( + saleDao.deleteInTimeRangeStatement( + FLAMETHROWER.getId(), + DATE_1, + 1, + FLAMETHROWER_SALE_1.getTs(), + Uuids.startOf(Uuids.unixTimestamp(FLAMETHROWER_SALE_4.getTs()) - 1000))); + + assertThat(saleDao.all().all()) + .containsOnly( + FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4, FLAMETHROWER_SALE_5, MP3_DOWNLOAD_SALE_1); + } + @Test public void should_delete_if_price_matches_custom_where() { ResultSet result = @@ -275,6 +370,26 @@ public void should_delete_if_price_matches_custom_where() { assertThat(result.wasApplied()).isTrue(); } + @Test + public void should_delete_if_price_matches_custom_where_statement() { + BoundStatement bs = + saleDao.deleteCustomWhereCustomIfStatement( + 2, FLAMETHROWER.getId(), DATE_1, FLAMETHROWER_SALE_2.getTs(), 250.0); + ResultSet result = SESSION_RULE.session().execute(bs); + + assertThat(result.wasApplied()).isFalse(); + Row row = result.one(); + assertThat(row).isNotNull(); + assertThat(row.getDouble("price")).isEqualTo(500.0); + + bs = + saleDao.deleteCustomWhereCustomIfStatement( + 2, FLAMETHROWER.getId(), DATE_1, FLAMETHROWER_SALE_2.getTs(), 500.0); + result = SESSION_RULE.session().execute(bs); + + assertThat(result.wasApplied()).isTrue(); + } + @Mapper public interface InventoryMapper { @DaoFactory @@ -300,6 +415,9 @@ public interface ProductDao { @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription); + @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") + BoundStatement deleteIfDescriptionMatchesStatement(UUID productId, String expectedDescription); + @Delete CompletionStage deleteAsync(Product product); @@ -330,18 +448,35 @@ public interface ProductSaleDao { @Delete(entityClass = ProductSale.class) ResultSet deleteByIdForDay(UUID id, String day); + // delete all rows in partition + @Delete(entityClass = ProductSale.class) + BoundStatement deleteByIdForDayStatement(UUID id, String day); + // delete by partition key and partial clustering key @Delete(entityClass = ProductSale.class) ResultSet deleteByIdForCustomer(UUID id, String day, int customerId); + // delete by partition key and partial clustering key + @Delete(entityClass = ProductSale.class) + BoundStatement deleteByIdForCustomerStatement(UUID id, String day, int customerId); + // delete row (full primary key) @Delete(entityClass = ProductSale.class) ResultSet deleteByIdForCustomerAtTime(UUID id, String day, int customerId, UUID ts); + // delete row (full primary key) + @Delete(entityClass = ProductSale.class) + BoundStatement deleteByIdForCustomerAtTimeStatement( + UUID id, String day, int customerId, UUID ts); + @Delete(entityClass = ProductSale.class, customIfClause = "price = :expectedPrice") ResultSet deleteIfPriceMatches( UUID id, String day, int customerId, UUID ts, double expectedPrice); + @Delete(entityClass = ProductSale.class, customIfClause = "price = :expectedPrice") + BoundStatement deleteIfPriceMatchesStatement( + UUID id, String day, int customerId, UUID ts, double expectedPrice); + @Delete( entityClass = ProductSale.class, customWhereClause = @@ -349,6 +484,14 @@ ResultSet deleteIfPriceMatches( + ":endTs") ResultSet deleteInTimeRange(UUID id, String day, int customerId, UUID startTs, UUID endTs); + @Delete( + entityClass = ProductSale.class, + customWhereClause = + "id = :id and day = :day and customer_id = :customerId and ts >= :startTs and ts < " + + ":endTs") + BoundStatement deleteInTimeRangeStatement( + UUID id, String day, int customerId, UUID startTs, UUID endTs); + // transpose order of parameters so doesn't match primary key to ensure that works. @Delete( entityClass = ProductSale.class, @@ -357,6 +500,14 @@ ResultSet deleteIfPriceMatches( ResultSet deleteCustomWhereCustomIf( int customerId, UUID id, String day, UUID ts, double expectedPrice); + // transpose order of parameters so doesn't match primary key to ensure that works. + @Delete( + entityClass = ProductSale.class, + customWhereClause = "id = :id and day = :day and customer_id = :customerId and ts = :ts", + customIfClause = "price = :expectedPrice") + BoundStatement deleteCustomWhereCustomIfStatement( + int customerId, UUID id, String day, UUID ts, double expectedPrice); + @Delete(entityClass = ProductSale.class, ifExists = true) boolean deleteIfExists(UUID id, String day, int customerId, UUID ts); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteReactiveIT.java new file mode 100644 index 00000000000..2eb898021ba --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/DeleteReactiveIT.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Delete; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import io.reactivex.Flowable; +import java.util.UUID; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +// Do not run LWT tests in parallel because they may interfere. Tests operate on the same row. +public class DeleteReactiveIT extends InventoryITBase { + + private static CustomCcmRule ccmRule = configureCcm(CustomCcmRule.builder()).build(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + private static CustomCcmRule.Builder configureCcm(CustomCcmRule.Builder builder) { + if (!CcmBridge.isDistributionOf( + BackendType.DSE, (dist, cass) -> cass.nextStable().compareTo(Version.V4_0_0) >= 0)) { + builder.withCassandraConfiguration("enable_sasi_indexes", true); + } + return builder; + } + + private static DseProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = sessionRule.session(); + + for (String query : createStatements(ccmRule)) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + } + + DseInventoryMapper inventoryMapper = + new DeleteReactiveIT_DseInventoryMapperBuilder(session).build(); + dao = inventoryMapper.productDao(sessionRule.keyspace()); + } + + @Before + public void insertFixtures() { + Flowable.fromPublisher(dao.saveReactive(FLAMETHROWER)).blockingSubscribe(); + } + + @Test + public void should_delete_entity_reactive() { + UUID id = FLAMETHROWER.getId(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); + + ReactiveResultSet rs = dao.deleteEntityReactive(FLAMETHROWER); + ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); + + assertThat(row).isNull(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) + .isNull(); + } + + @Test + public void should_delete_by_id_reactive() { + UUID id = FLAMETHROWER.getId(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); + + ReactiveResultSet rs = dao.deleteByIdReactive(id); + ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); + + assertThat(row).isNull(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) + .isNull(); + + // Non-existing id should be silently ignored + rs = dao.deleteByIdReactive(id); + row = Flowable.fromPublisher(rs).singleElement().blockingGet(); + + assertThat(row).isNull(); + } + + @Test + public void should_delete_if_exists_reactive() { + UUID id = FLAMETHROWER.getId(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); + { + ReactiveResultSet rs = dao.deleteIfExistsReactive(FLAMETHROWER); + ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); + assertThat(row.wasApplied()).isTrue(); + assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isTrue(); + } + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) + .isNull(); + { + ReactiveResultSet rs = dao.deleteIfExistsReactive(FLAMETHROWER); + ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); + assertThat(row.wasApplied()).isFalse(); + assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isFalse(); + } + } + + @Test + public void should_delete_with_condition_reactive() { + UUID id = FLAMETHROWER.getId(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()).isNotNull(); + { + ReactiveResultSet rs = dao.deleteIfDescriptionMatchesReactive(id, "foo"); + ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); + assertThat(row.wasApplied()).isFalse(); + assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isFalse(); + assertThat(row.getString("description")).isEqualTo(FLAMETHROWER.getDescription()); + } + { + ReactiveResultSet rs = + dao.deleteIfDescriptionMatchesReactive(id, FLAMETHROWER.getDescription()); + ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); + assertThat(row.wasApplied()).isTrue(); + assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isTrue(); + } + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) + .isNull(); + } + + @Mapper + public interface DseInventoryMapper { + @DaoFactory + DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface DseProductDao { + + @Delete + ReactiveResultSet deleteEntityReactive(Product product); + + @Delete(entityClass = Product.class) + ReactiveResultSet deleteByIdReactive(UUID productId); + + @Delete(ifExists = true) + ReactiveResultSet deleteIfExistsReactive(Product product); + + @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") + ReactiveResultSet deleteIfDescriptionMatchesReactive( + UUID productId, String expectedDescription); + + @Select + MappedReactiveResultSet findByIdReactive(UUID productId); + + @Insert + ReactiveResultSet saveReactive(Product product); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java index 2905f0d694d..3e532e97c00 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/EntityPolymorphismIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,6 +47,7 @@ import com.datastax.oss.driver.api.mapper.annotations.Update; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; @@ -69,33 +72,39 @@ @Category(ParallelizableTests.class) @RunWith(DataProviderRunner.class) public class EntityPolymorphismIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - private static TestMapper mapper; + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static TestMapper mapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); - for (String query : - ImmutableList.of( - "CREATE TYPE point2d (\"X\" int, \"Y\" int)", - "CREATE TYPE point3d (\"X\" int, \"Y\" int, \"Z\" int)", - "CREATE TABLE circles (circle_id uuid PRIMARY KEY, center2d frozen, radius " - + "double, tags set)", - "CREATE TABLE rectangles (rect_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen, tags set)", - "CREATE TABLE squares (square_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen, tags set)", - "CREATE TABLE spheres (sphere_id uuid PRIMARY KEY, center3d frozen, radius " - + "double, tags set)", - "CREATE TABLE devices (device_id uuid PRIMARY KEY, name text)", - "CREATE TABLE tracked_devices (device_id uuid PRIMARY KEY, name text, location text)", - "CREATE TABLE simple_devices (id uuid PRIMARY KEY, in_use boolean)")) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } + CqlSession session = SESSION_RULE.session(); + SchemaChangeSynchronizer.withLock( + () -> { + for (String query : + ImmutableList.of( + "CREATE TYPE point2d (\"X\" int, \"Y\" int)", + "CREATE TYPE point3d (\"X\" int, \"Y\" int, \"Z\" int)", + "CREATE TABLE circles (circle_id uuid PRIMARY KEY, center2d frozen, radius " + + "double, tags set)", + "CREATE TABLE rectangles (rect_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen, tags set)", + "CREATE TABLE squares (square_id uuid PRIMARY KEY, bottom_left frozen, top_right frozen, tags set)", + "CREATE TABLE spheres (sphere_id uuid PRIMARY KEY, center3d frozen, radius " + + "double, tags set)", + "CREATE TABLE devices (device_id uuid PRIMARY KEY, name text)", + "CREATE TABLE tracked_devices (device_id uuid PRIMARY KEY, name text, location text)", + "CREATE TABLE simple_devices (id uuid PRIMARY KEY, in_use boolean)")) { + session.execute( + SimpleStatement.builder(query) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + }); mapper = new EntityPolymorphismIT_TestMapperBuilder(session).build(); } @@ -247,8 +256,8 @@ public void should_set_and_get_entity_then_update_then_delete( Consumer updater, SimpleStatement insertStatement, SimpleStatement selectStatement) { - BaseDao dao = daoProvider.apply(sessionRule.keyspace()); - CqlSession session = sessionRule.session(); + BaseDao dao = daoProvider.apply(SESSION_RULE.keyspace()); + CqlSession session = SESSION_RULE.session(); PreparedStatement prepared = session.prepare(insertStatement); BoundStatementBuilder bs = prepared.boundStatementBuilder(); @@ -280,7 +289,7 @@ public void should_save_and_retrieve_circle() { // * annotations, but these are primarily used for // verifying inheritance behavior in Sphere. // * verifies writeTime is set. - CircleDao dao = mapper.circleDao(sessionRule.keyspace()); + CircleDao dao = mapper.circleDao(SESSION_RULE.keyspace()); long writeTime = System.currentTimeMillis() - 1000; Circle circle = new Circle(new Point2D(11, 22), 12.34); @@ -297,7 +306,7 @@ public void should_save_and_retrieve_rectangle() { // * CqlName("rect_id") on getId renames id property to rect_id // * annotations work, but these are primarily used for // verifying inheritance behavior in Square. - RectangleDao dao = mapper.rectangleDao(sessionRule.keyspace()); + RectangleDao dao = mapper.rectangleDao(SESSION_RULE.keyspace()); Rectangle rectangle = new Rectangle(new Point2D(20, 30), new Point2D(50, 60)); dao.save(rectangle); @@ -312,7 +321,7 @@ public void should_save_and_retrieve_square() { // * height remains transient even though we define field/getter/setter // * getBottomLeft() retains CqlName from parent. // * verifies writeTime is set. - SquareDao dao = mapper.squareDao(sessionRule.keyspace()); + SquareDao dao = mapper.squareDao(SESSION_RULE.keyspace()); long writeTime = System.currentTimeMillis() - 1000; Square square = new Square(new Point2D(20, 30), new Point2D(50, 60)); @@ -333,7 +342,7 @@ public void should_save_and_retrieve_sphere() { // * Override setRadius to return Sphere causes no issues. // * Interface method getVolume() is skipped because no field exists. // * WriteTime is inherited, so queried and set. - SphereDao dao = mapper.sphereDao(sessionRule.keyspace()); + SphereDao dao = mapper.sphereDao(SESSION_RULE.keyspace()); long writeTime = System.currentTimeMillis() - 1000; Sphere sphere = new Sphere(new Point3D(11, 22, 33), 34.56); @@ -349,7 +358,7 @@ public void should_save_and_retrieve_device() throws Exception { // verifies the hierarchy scanner behavior around Device: // * by virtue of Assert setting highestAncestor to Asset.class, location property from // LocatableItem should not be included - DeviceDao dao = mapper.deviceDao(sessionRule.keyspace(), CqlIdentifier.fromCql("devices")); + DeviceDao dao = mapper.deviceDao(SESSION_RULE.keyspace(), CqlIdentifier.fromCql("devices")); // save should be successful as location property omitted. Device device = new Device("my device", "New York"); @@ -377,7 +386,7 @@ public void should_save_and_retrieve_tracked_device() throws Exception { // include LocatableItem's location property, even though Asset defines // a strategy that excludes it. TrackedDeviceDao dao = - mapper.trackedDeviceDao(sessionRule.keyspace(), CqlIdentifier.fromCql("tracked_devices")); + mapper.trackedDeviceDao(SESSION_RULE.keyspace(), CqlIdentifier.fromCql("tracked_devices")); TrackedDevice device = new TrackedDevice("my device", "New York"); dao.save(device); @@ -400,7 +409,7 @@ public void should_save_and_retrieve_simple_device() { // verifies the hierarchy scanner behavior around SimpleDevice: // * Since SimpleDevice defines a @HierarchyScanStrategy that prevents // scanning of ancestors, only its properties (id, inUse) should be included. - SimpleDeviceDao dao = mapper.simpleDeviceDao(sessionRule.keyspace()); + SimpleDeviceDao dao = mapper.simpleDeviceDao(SESSION_RULE.keyspace()); SimpleDevice device = new SimpleDevice(true); dao.save(device); @@ -444,11 +453,14 @@ public void setY(int y) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Point2D point2D = (Point2D) o; - return x == point2D.x && y == point2D.y; + public boolean equals(Object other) { + if (this == other) return true; + else if (other instanceof Point2D) { + Point2D that = (Point2D) other; + return this.x == that.x && this.y == that.y; + } else { + return false; + } } @Override @@ -478,12 +490,15 @@ public void setZ(int z) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - Point3D point3D = (Point3D) o; - return z == point3D.z; + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Point3D) { + Point3D that = (Point3D) other; + return super.equals(that) && this.z == that.z; + } else { + return false; + } } @Override @@ -535,11 +550,15 @@ public void setTags(Set tags) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Shape shape = (Shape) o; - return Objects.equals(id, shape.id) && Objects.equals(tags, shape.tags); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Shape) { + Shape that = (Shape) other; + return Objects.equals(id, that.id) && Objects.equals(tags, that.tags); + } else { + return false; + } } @Override @@ -579,7 +598,7 @@ public UUID getId() { @Override public double getArea() { - return Math.PI * (Math.pow(getRadius(), 2)); + return Math.PI * Math.pow(getRadius(), 2); } public double getRadius() { @@ -610,12 +629,17 @@ public void setCenter(Point2D center) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - Circle circle = (Circle) o; - return Double.compare(circle.radius, radius) == 0 && center.equals(circle.center); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Circle) { + Circle that = (Circle) other; + return super.equals(that) + && Double.compare(that.radius, radius) == 0 + && center.equals(that.center); + } else { + return false; + } } @Override @@ -679,12 +703,17 @@ public double getArea() { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - Rectangle rectangle = (Rectangle) o; - return bottomLeft.equals(rectangle.bottomLeft) && topRight.equals(rectangle.topRight); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Rectangle) { + Rectangle that = (Rectangle) other; + return super.equals(that) + && bottomLeft.equals(that.bottomLeft) + && topRight.equals(that.topRight); + } else { + return false; + } } @Override @@ -697,8 +726,6 @@ public int hashCode() { @Entity static class Square extends Rectangle implements WriteTimeProvider { - private Point2D height; - @Computed("writetime(bottom_left)") private long writeTime; @@ -787,12 +814,15 @@ public double getVolume() { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - Sphere sphere = (Sphere) o; - return writeTime == sphere.writeTime; + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Sphere) { + Sphere that = (Sphere) other; + return super.equals(that) && writeTime == that.writeTime; + } else { + return false; + } } @Override @@ -820,11 +850,15 @@ public void setLocation(String location) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - LocatableItem that = (LocatableItem) o; - return Objects.equals(location, that.location); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof LocatableItem) { + LocatableItem that = (LocatableItem) other; + return Objects.equals(this.location, that.location); + } else { + return false; + } } @Override @@ -855,12 +889,15 @@ public void setName(String name) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - Asset asset = (Asset) o; - return Objects.equals(name, asset.name); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Asset) { + Asset that = (Asset) other; + return super.equals(that) && Objects.equals(this.name, that.name); + } else { + return false; + } } @Override @@ -894,12 +931,15 @@ public void setId(UUID id) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - Device device = (Device) o; - return id.equals(device.id); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof Device) { + Device that = (Device) other; + return super.equals(that) && this.id.equals(that.id); + } else { + return false; + } } @Override @@ -960,12 +1000,15 @@ public void setId(UUID id) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; - SimpleDevice that = (SimpleDevice) o; - return inUse == that.inUse && id.equals(that.id); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof SimpleDevice) { + SimpleDevice that = (SimpleDevice) other; + return super.equals(that) && this.inUse == that.inUse && this.id.equals(that.id); + } else { + return false; + } } @Override diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/FluentEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/FluentEntityIT.java new file mode 100644 index 00000000000..fa93e4e768b --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/FluentEntityIT.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle.FLUENT; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.CqlName; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import java.util.Objects; +import java.util.UUID; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class FluentEntityIT extends InventoryITBase { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static FluentProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + + for (String query : createStatements(CCM_RULE)) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); + } + + InventoryMapper mapper = InventoryMapper.builder(session).build(); + dao = mapper.immutableProductDao(SESSION_RULE.keyspace()); + } + + @Test + public void should_insert_and_retrieve_immutable_entities() { + FluentProduct originalProduct = + new FluentProduct() + .id(UUID.randomUUID()) + .description("mock description") + .dimensions(new Dimensions(1, 2, 3)); + dao.save(originalProduct); + + FluentProduct retrievedProduct = dao.findById(originalProduct.id()); + assertThat(retrievedProduct).isEqualTo(originalProduct); + } + + @Entity + @CqlName("product") + @PropertyStrategy(getterStyle = FLUENT, setterStyle = SetterStyle.FLUENT) + public static class FluentProduct { + @PartitionKey private UUID id; + private String description; + private Dimensions dimensions; + + public UUID id() { + return id; + } + + public FluentProduct id(UUID id) { + this.id = id; + return this; + } + + public String description() { + return description; + } + + public FluentProduct description(String description) { + this.description = description; + return this; + } + + public Dimensions dimensions() { + return dimensions; + } + + public FluentProduct dimensions(Dimensions dimensions) { + this.dimensions = dimensions; + return this; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof FluentProduct) { + FluentProduct that = (FluentProduct) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.description, that.description) + && Objects.equals(this.dimensions, that.dimensions); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(id, description, dimensions); + } + } + + @Mapper + public interface InventoryMapper { + static MapperBuilder builder(CqlSession session) { + return new FluentEntityIT_InventoryMapperBuilder(session); + } + + @DaoFactory + FluentProductDao immutableProductDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface FluentProductDao { + @Select + FluentProduct findById(UUID productId); + + @Insert + void save(FluentProduct product); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java index a3ea7b53517..d3f3eec93ae 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GetEntityIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.mapper; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -25,6 +28,8 @@ import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.api.mapper.annotations.Dao; import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; @@ -38,6 +43,8 @@ import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; import com.datastax.oss.driver.shaded.guava.common.collect.Sets; +import java.util.UUID; +import java.util.stream.Stream; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -48,37 +55,53 @@ @Category(ParallelizableTests.class) public class GetEntityIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final UUID PRODUCT_2D_ID = UUID.randomUUID(); private static ProductDao dao; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); - for (String query : createStatements(ccm)) { + for (String query : createStatements(CCM_RULE)) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } + UserDefinedType dimensions2d = + session + .getKeyspace() + .flatMap(ks -> session.getMetadata().getKeyspace(ks)) + .flatMap(ks -> ks.getUserDefinedType("dimensions2d")) + .orElseThrow(AssertionError::new); + session.execute( + "INSERT INTO product2d (id, description, dimensions) VALUES (?, ?, ?)", + PRODUCT_2D_ID, + "2D product", + dimensions2d.newValue(12, 34)); + InventoryMapper inventoryMapper = new GetEntityIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); dao.save(FLAMETHROWER); dao.save(MP3_DOWNLOAD); } @Test - public void should_get_entity_from_row() { - CqlSession session = sessionRule.session(); + public void should_get_entity_from_complete_row() { + CqlSession session = SESSION_RULE.session(); ResultSet rs = session.execute( SimpleStatement.newInstance( - "SELECT * FROM product WHERE id = ?", FLAMETHROWER.getId())); + "SELECT id, description, dimensions, now() FROM product WHERE id = ?", + FLAMETHROWER.getId())); Row row = rs.one(); assertThat(row).isNotNull(); @@ -86,9 +109,87 @@ public void should_get_entity_from_row() { assertThat(product).isEqualTo(FLAMETHROWER); } + @Test + public void should_not_get_entity_from_partial_row_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT id, description, now() FROM product WHERE id = ?", FLAMETHROWER.getId())); + Row row = rs.one(); + assertThat(row).isNotNull(); + + Throwable error = catchThrowable(() -> dao.get(row)); + assertThat(error).hasMessage("dimensions is not a column in this row"); + } + + @Test + public void should_get_entity_from_partial_row_when_lenient() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT id, dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID)); + Row row = rs.one(); + assertThat(row).isNotNull(); + + Product product = dao.getLenient(row); + assertThat(product.getId()).isEqualTo(PRODUCT_2D_ID); + assertThat(product.getDescription()).isNull(); + assertThat(product.getDimensions()).isNotNull(); + assertThat(product.getDimensions().getWidth()).isEqualTo(12); + assertThat(product.getDimensions().getHeight()).isEqualTo(34); + assertThat(product.getDimensions().getLength()).isZero(); + } + + @Test + public void should_get_entity_from_complete_udt_value() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT dimensions FROM product WHERE id = ?", FLAMETHROWER.getId())); + Row row = rs.one(); + assertThat(row).isNotNull(); + + Dimensions dimensions = dao.get(row.getUdtValue(0)); + assertThat(dimensions).isEqualTo(FLAMETHROWER.getDimensions()); + } + + @Test + public void should_not_get_entity_from_partial_udt_value_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID)); + Row row = rs.one(); + assertThat(row).isNotNull(); + + Throwable error = catchThrowable(() -> dao.get(row.getUdtValue(0))); + assertThat(error).hasMessage("length is not a field in this UDT"); + } + + @Test + public void should_get_entity_from_partial_udt_value_when_lenient() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID)); + Row row = rs.one(); + assertThat(row).isNotNull(); + + Dimensions dimensions = dao.getLenient(row.getUdtValue(0)); + assertThat(dimensions).isNotNull(); + assertThat(dimensions.getWidth()).isEqualTo(12); + assertThat(dimensions.getHeight()).isEqualTo(34); + assertThat(dimensions.getLength()).isZero(); + } + @Test public void should_get_entity_from_first_row_of_result_set() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); ResultSet rs = session.execute("SELECT * FROM product"); Product product = dao.getOne(rs); @@ -98,7 +199,7 @@ public void should_get_entity_from_first_row_of_result_set() { @Test public void should_get_entity_from_first_row_of_async_result_set() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); AsyncResultSet rs = CompletableFutures.getUninterruptibly(session.executeAsync("SELECT * FROM product")); @@ -109,15 +210,23 @@ public void should_get_entity_from_first_row_of_async_result_set() { @Test public void should_get_iterable_from_result_set() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); ResultSet rs = session.execute("SELECT * FROM product"); PagingIterable products = dao.get(rs); assertThat(Sets.newHashSet(products)).containsOnly(FLAMETHROWER, MP3_DOWNLOAD); } + @Test + public void should_get_stream_from_result_set() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = session.execute("SELECT * FROM product"); + Stream products = dao.getAsStream(rs); + assertThat(products).containsOnly(FLAMETHROWER, MP3_DOWNLOAD); + } + @Test public void should_get_async_iterable_from_async_result_set() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); AsyncResultSet rs = CompletableFutures.getUninterruptibly(session.executeAsync("SELECT * FROM product")); MappedAsyncPagingIterable products = dao.get(rs); @@ -134,12 +243,25 @@ public interface InventoryMapper { @Dao @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) public interface ProductDao { + @GetEntity Product get(Row row); + @GetEntity(lenient = true) + Product getLenient(Row row); + + @GetEntity + Dimensions get(UdtValue row); + + @GetEntity(lenient = true) + Dimensions getLenient(UdtValue row); + @GetEntity PagingIterable get(ResultSet resultSet); + @GetEntity + Stream getAsStream(ResultSet resultSet); + @GetEntity MappedAsyncPagingIterable get(AsyncResultSet resultSet); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GuavaFutureProducerService.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GuavaFutureProducerService.java new file mode 100644 index 00000000000..759b01a4e20 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/GuavaFutureProducerService.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducerService; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Futures; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.ListenableFuture; +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.SettableFuture; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +public class GuavaFutureProducerService implements MapperResultProducerService { + + @Override + public Iterable getProducers() { + return ImmutableList.of( + // Note that order matters, both producers operate on ListenableFuture, + // the most specific must come first. + new VoidListenableFutureProducer(), new SingleEntityListenableFutureProducer()); + } + + public abstract static class ListenableFutureProducer implements MapperResultProducer { + + @Nullable + @Override + public ListenableFuture execute( + @NonNull Statement statement, + @NonNull MapperContext context, + @Nullable EntityHelper entityHelper) { + SettableFuture result = SettableFuture.create(); + context + .getSession() + .executeAsync(statement) + .whenComplete( + (resultSet, error) -> { + if (error != null) { + result.setException(error); + } else { + result.set(convert(resultSet, entityHelper)); + } + }); + return result; + } + + @Nullable + protected abstract Object convert( + @NonNull AsyncResultSet resultSet, @Nullable EntityHelper entityHelper); + + @Nullable + @Override + public ListenableFuture wrapError(@NonNull Exception e) { + return Futures.immediateFailedFuture(e); + } + } + + public static class VoidListenableFutureProducer extends ListenableFutureProducer { + + private static final GenericType> PRODUCED_TYPE = + new GenericType>() {}; + + @Override + public boolean canProduce(@NonNull GenericType resultType) { + return resultType.equals(PRODUCED_TYPE); + } + + @Nullable + @Override + protected Object convert( + @NonNull AsyncResultSet resultSet, @Nullable EntityHelper entityHelper) { + // ignore results + return null; + } + } + + public static class SingleEntityListenableFutureProducer extends ListenableFutureProducer { + + @Override + public boolean canProduce(@NonNull GenericType resultType) { + return resultType.getRawType().equals(ListenableFuture.class); + } + + @Nullable + @Override + protected Object convert( + @NonNull AsyncResultSet resultSet, @Nullable EntityHelper entityHelper) { + assert entityHelper != null; + Row row = resultSet.one(); + return (row == null) ? null : entityHelper.get(row, false); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ImmutableEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ImmutableEntityIT.java new file mode 100644 index 00000000000..bdfe92a23f9 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ImmutableEntityIT.java @@ -0,0 +1,309 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle.FLUENT; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.Computed; +import com.datastax.oss.driver.api.mapper.annotations.CqlName; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.GetEntity; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import java.util.Objects; +import java.util.UUID; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class ImmutableEntityIT extends InventoryITBase { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static final UUID PRODUCT_2D_ID = UUID.randomUUID(); + + private static ImmutableProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + + SchemaChangeSynchronizer.withLock( + () -> { + for (String query : createStatements(CCM_RULE)) { + session.execute( + SimpleStatement.builder(query) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + }); + + UserDefinedType dimensions2d = + session + .getKeyspace() + .flatMap(ks -> session.getMetadata().getKeyspace(ks)) + .flatMap(ks -> ks.getUserDefinedType("dimensions2d")) + .orElseThrow(AssertionError::new); + session.execute( + "INSERT INTO product2d (id, description, dimensions) VALUES (?, ?, ?)", + PRODUCT_2D_ID, + "2D product", + dimensions2d.newValue(12, 34)); + + InventoryMapper mapper = InventoryMapper.builder(session).build(); + dao = mapper.immutableProductDao(SESSION_RULE.keyspace()); + } + + @Test + public void should_insert_and_retrieve_immutable_entities() { + ImmutableProduct originalProduct = + new ImmutableProduct( + UUID.randomUUID(), "mock description", new ImmutableDimensions(1, 2, 3), -1); + dao.save(originalProduct); + + ImmutableProduct retrievedProduct = dao.findById(originalProduct.id()); + assertThat(retrievedProduct).isEqualTo(originalProduct); + } + + @Test + public void should_map_immutable_entity_from_complete_row() { + ImmutableProduct originalProduct = + new ImmutableProduct( + UUID.randomUUID(), "mock description", new ImmutableDimensions(1, 2, 3), -1); + dao.save(originalProduct); + Row row = + SESSION_RULE + .session() + .execute( + "SELECT id, description, dimensions, writetime(description) AS writetime, now() " + + "FROM product WHERE id = ?", + originalProduct.id()) + .one(); + ImmutableProduct retrievedProduct = dao.mapStrict(row); + assertThat(retrievedProduct.id()).isEqualTo(originalProduct.id()); + assertThat(retrievedProduct.description()).isEqualTo(originalProduct.description()); + assertThat(retrievedProduct.dimensions()).isEqualTo(originalProduct.dimensions()); + assertThat(retrievedProduct.writetime()).isGreaterThan(0); + } + + @Test + public void should_map_immutable_entity_from_partial_row_when_lenient() { + Row row = + SESSION_RULE + .session() + .execute("SELECT id, dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID) + .one(); + ImmutableProduct retrievedProduct = dao.mapLenient(row); + assertThat(retrievedProduct.id()).isEqualTo(PRODUCT_2D_ID); + assertThat(retrievedProduct.dimensions()).isEqualTo(new ImmutableDimensions(0, 12, 34)); + assertThat(retrievedProduct.description()).isNull(); + assertThat(retrievedProduct.writetime()).isZero(); + } + + @Test + public void should_map_immutable_entity_from_complete_udt() { + ImmutableProduct originalProduct = + new ImmutableProduct( + UUID.randomUUID(), "mock description", new ImmutableDimensions(1, 2, 3), -1); + dao.save(originalProduct); + Row row = + SESSION_RULE + .session() + .execute("SELECT dimensions FROM product WHERE id = ?", originalProduct.id()) + .one(); + assertThat(row).isNotNull(); + ImmutableDimensions retrievedDimensions = dao.mapStrict(row.getUdtValue(0)); + assertThat(retrievedDimensions).isEqualTo(originalProduct.dimensions()); + } + + @Test + public void should_map_immutable_entity_from_partial_udt_when_lenient() { + Row row = + SESSION_RULE + .session() + .execute("SELECT dimensions FROM product2d WHERE id = ?", PRODUCT_2D_ID) + .one(); + assertThat(row).isNotNull(); + ImmutableDimensions retrievedDimensions = dao.mapLenient(row.getUdtValue(0)); + assertThat(retrievedDimensions).isEqualTo(new ImmutableDimensions(0, 12, 34)); + } + + @Entity + @CqlName("product") + @PropertyStrategy(getterStyle = FLUENT, mutable = false) + public static class ImmutableProduct { + @PartitionKey private final UUID id; + private final String description; + private final ImmutableDimensions dimensions; + + @Computed("writetime(description)") + private final long writetime; + + public ImmutableProduct( + UUID id, String description, ImmutableDimensions dimensions, long writetime) { + this.id = id; + this.description = description; + this.dimensions = dimensions; + this.writetime = writetime; + } + + public UUID id() { + return id; + } + + public String description() { + return description; + } + + public ImmutableDimensions dimensions() { + return dimensions; + } + + public long writetime() { + return writetime; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof ImmutableProduct) { + ImmutableProduct that = (ImmutableProduct) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.description, that.description) + && Objects.equals(this.dimensions, that.dimensions); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(id, description, dimensions); + } + } + + @Entity + @PropertyStrategy(mutable = false) + public static class ImmutableDimensions { + + private final int length; + private final int width; + private final int height; + + public ImmutableDimensions(int length, int width, int height) { + this.length = length; + this.width = width; + this.height = height; + } + + public int getLength() { + return length; + } + + public int getWidth() { + return width; + } + + public int getHeight() { + return height; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof ImmutableDimensions) { + ImmutableDimensions that = (ImmutableDimensions) other; + return this.length == that.length && this.width == that.width && this.height == that.height; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(length, width, height); + } + + @Override + public String toString() { + return "Dimensions{length=" + length + ", width=" + width + ", height=" + height + '}'; + } + } + + @Mapper + public interface InventoryMapper { + static MapperBuilder builder(CqlSession session) { + return new ImmutableEntityIT_InventoryMapperBuilder(session); + } + + @DaoFactory + ImmutableProductDao immutableProductDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface ImmutableProductDao { + @Select + ImmutableProduct findById(UUID productId); + + @Insert + void save(ImmutableProduct product); + + @GetEntity + ImmutableProduct mapStrict(Row row); + + @GetEntity(lenient = true) + ImmutableProduct mapLenient(Row row); + + @GetEntity + ImmutableDimensions mapStrict(UdtValue udt); + + @GetEntity(lenient = true) + ImmutableDimensions mapLenient(UdtValue udt); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementIT.java new file mode 100644 index 00000000000..16b6668ea56 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementIT.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Increment; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import java.util.Objects; +import java.util.UUID; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class IncrementIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static ProductRatingDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + + session.execute( + SimpleStatement.builder( + "CREATE TABLE product_rating(product_id uuid PRIMARY KEY, " + + "one_star counter, two_star counter, three_star counter, " + + "four_star counter, five_star counter)") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + + InventoryMapper inventoryMapper = new IncrementIT_InventoryMapperBuilder(session).build(); + dao = inventoryMapper.productRatingDao(SESSION_RULE.keyspace()); + } + + @Test + public void should_increment_counters() { + UUID productId1 = UUID.randomUUID(); + UUID productId2 = UUID.randomUUID(); + + dao.incrementFiveStar(productId1, 1); + dao.incrementFiveStar(productId1, 1); + dao.incrementFourStar(productId1, 1); + + dao.incrementTwoStar(productId2, 1); + dao.incrementThreeStar(productId2, 1); + dao.incrementOneStar(productId2, 1); + + ProductRating product1Totals = dao.get(productId1); + assertThat(product1Totals.getFiveStar()).isEqualTo(2); + assertThat(product1Totals.getFourStar()).isEqualTo(1); + assertThat(product1Totals.getThreeStar()).isEqualTo(0); + assertThat(product1Totals.getTwoStar()).isEqualTo(0); + assertThat(product1Totals.getOneStar()).isEqualTo(0); + + ProductRating product2Totals = dao.get(productId2); + assertThat(product2Totals.getFiveStar()).isEqualTo(0); + assertThat(product2Totals.getFourStar()).isEqualTo(0); + assertThat(product2Totals.getThreeStar()).isEqualTo(1); + assertThat(product2Totals.getTwoStar()).isEqualTo(1); + assertThat(product2Totals.getOneStar()).isEqualTo(1); + } + + @Mapper + public interface InventoryMapper { + @DaoFactory + ProductRatingDao productRatingDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface ProductRatingDao { + @Select + ProductRating get(UUID productId); + + @Increment(entityClass = ProductRating.class) + void incrementOneStar(UUID productId, long oneStar); + + @Increment(entityClass = ProductRating.class) + void incrementTwoStar(UUID productId, long twoStar); + + @Increment(entityClass = ProductRating.class) + void incrementThreeStar(UUID productId, long threeStar); + + @Increment(entityClass = ProductRating.class) + void incrementFourStar(UUID productId, long fourStar); + + @Increment(entityClass = ProductRating.class) + void incrementFiveStar(UUID productId, long fiveStar); + } + + @Entity + public static class ProductRating { + + @PartitionKey private UUID productId; + private long oneStar; + private long twoStar; + private long threeStar; + private long fourStar; + private long fiveStar; + + public ProductRating() {} + + public UUID getProductId() { + return productId; + } + + public void setProductId(UUID productId) { + this.productId = productId; + } + + public long getOneStar() { + return oneStar; + } + + public void setOneStar(long oneStar) { + this.oneStar = oneStar; + } + + public long getTwoStar() { + return twoStar; + } + + public void setTwoStar(long twoStar) { + this.twoStar = twoStar; + } + + public long getThreeStar() { + return threeStar; + } + + public void setThreeStar(long threeStar) { + this.threeStar = threeStar; + } + + public long getFourStar() { + return fourStar; + } + + public void setFourStar(long fourStar) { + this.fourStar = fourStar; + } + + public long getFiveStar() { + return fiveStar; + } + + public void setFiveStar(long fiveStar) { + this.fiveStar = fiveStar; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof ProductRating) { + ProductRating that = (ProductRating) other; + return Objects.equals(this.productId, that.productId) + && this.oneStar == that.oneStar + && this.twoStar == that.twoStar + && this.threeStar == that.threeStar + && this.fourStar == that.fourStar + && this.fiveStar == that.fiveStar; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(productId, oneStar, twoStar, threeStar, fourStar, fiveStar); + } + + @Override + public String toString() { + return String.format( + "ProductRating(id=%s, 1*=%d, 2*=%d, 3*=%d, 4*=%d, 5*=%d)", + productId, oneStar, twoStar, threeStar, fourStar, fiveStar); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementWithNullsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementWithNullsIT.java new file mode 100644 index 00000000000..9020a80afed --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/IncrementWithNullsIT.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Increment; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.mapper.IncrementIT.ProductRating; +import java.util.UUID; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement(type = BackendType.CASSANDRA, minInclusive = "2.2") +public class IncrementWithNullsIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static ProductRatingDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + + session.execute( + SimpleStatement.builder( + "CREATE TABLE product_rating(product_id uuid PRIMARY KEY, " + + "one_star counter, two_star counter, three_star counter, " + + "four_star counter, five_star counter)") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + + InventoryMapper inventoryMapper = + new IncrementWithNullsIT_InventoryMapperBuilder(session).build(); + dao = inventoryMapper.productRatingDao(SESSION_RULE.keyspace()); + } + + @Test + public void should_increment_counters() { + UUID productId1 = UUID.randomUUID(); + UUID productId2 = UUID.randomUUID(); + + dao.increment(productId1, null, null, null, null, 1L); + dao.increment(productId1, null, null, null, null, 1L); + dao.increment(productId1, null, null, null, 1L, null); + + dao.increment(productId2, null, 1L, null, null, null); + dao.increment(productId2, null, null, 1L, null, null); + dao.increment(productId2, 1L, null, null, null, null); + + ProductRating product1Totals = dao.get(productId1); + assertThat(product1Totals.getFiveStar()).isEqualTo(2); + assertThat(product1Totals.getFourStar()).isEqualTo(1); + assertThat(product1Totals.getThreeStar()).isEqualTo(0); + assertThat(product1Totals.getTwoStar()).isEqualTo(0); + assertThat(product1Totals.getOneStar()).isEqualTo(0); + + ProductRating product2Totals = dao.get(productId2); + assertThat(product2Totals.getFiveStar()).isEqualTo(0); + assertThat(product2Totals.getFourStar()).isEqualTo(0); + assertThat(product2Totals.getThreeStar()).isEqualTo(1); + assertThat(product2Totals.getTwoStar()).isEqualTo(1); + assertThat(product2Totals.getOneStar()).isEqualTo(1); + } + + @Mapper + public interface InventoryMapper { + @DaoFactory + ProductRatingDao productRatingDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.DO_NOT_SET) + public interface ProductRatingDao { + @Select + ProductRating get(UUID productId); + + @Increment(entityClass = ProductRating.class) + void increment( + UUID productId, Long oneStar, Long twoStar, Long threeStar, Long fourStar, Long fiveStar); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java index c0f9129fc3b..5c7530bf69e 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; @@ -50,34 +53,34 @@ @Category(ParallelizableTests.class) public class InsertIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; - private static InventoryMapper inventoryMapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); - for (String query : createStatements(ccm)) { + for (String query : createStatements(CCM_RULE)) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } - inventoryMapper = new InsertIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); + InventoryMapper inventoryMapper = new InsertIT_InventoryMapperBuilder(session).build(); + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); } @Before public void clearProductData() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); } @@ -98,6 +101,15 @@ public void should_insert_entity_returning_result_set() { assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); } + @Test + public void should_return_bound_statement_to_execute() { + assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); + BoundStatement bs = dao.saveReturningBoundStatement(FLAMETHROWER); + ResultSet rs = SESSION_RULE.session().execute(bs); + assertThat(rs.getAvailableWithoutFetching()).isZero(); + assertThat(dao.findById(FLAMETHROWER.getId())).isEqualTo(FLAMETHROWER); + } + @Test public void should_insert_entity_asynchronously() { assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); @@ -123,7 +135,7 @@ public void should_insert_entity_with_bound_timestamp() { long timestamp = 1234; dao.saveWithBoundTimestamp(FLAMETHROWER, timestamp); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -141,7 +153,7 @@ public void should_insert_entity_with_literal_timestamp() { dao.saveWithLiteralTimestamp(FLAMETHROWER); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -160,7 +172,7 @@ public void should_insert_entity_with_bound_ttl() { int insertedTtl = 86400; dao.saveWithBoundTtl(FLAMETHROWER, insertedTtl); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -177,7 +189,7 @@ public void should_insert_entity_with_literal_ttl() { dao.saveWithLiteralTtl(FLAMETHROWER); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -195,7 +207,7 @@ public void should_insert_entity_with_bound_timestamp_asynchronously() { long timestamp = 1234; CompletableFutures.getUninterruptibly(dao.saveAsyncWithBoundTimestamp(FLAMETHROWER, timestamp)); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -289,6 +301,9 @@ public interface ProductDao { @Insert ResultSet saveReturningResultSet(Product product); + @Insert + BoundStatement saveReturningBoundStatement(Product product); + @Insert(timestamp = ":timestamp") void saveWithBoundTimestamp(Product product, long timestamp); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertReactiveIT.java new file mode 100644 index 00000000000..e9b9879fcb8 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InsertReactiveIT.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import io.reactivex.Flowable; +import java.util.UUID; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class InsertReactiveIT extends InventoryITBase { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + private static DseProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = sessionRule.session(); + + for (String query : createStatements(ccmRule)) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + } + + DseInventoryMapper dseInventoryMapper = + new InsertReactiveIT_DseInventoryMapperBuilder(session).build(); + dao = dseInventoryMapper.productDao(sessionRule.keyspace()); + } + + @Before + public void clearProductData() { + CqlSession session = sessionRule.session(); + session.execute( + SimpleStatement.builder("TRUNCATE product") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + + @Test + public void should_insert_entity_returning_reactive_result_set() { + assertThat( + Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())) + .singleElement() + .blockingGet()) + .isNull(); + assertThat(Flowable.fromPublisher(dao.saveReactive(FLAMETHROWER)).singleElement().blockingGet()) + .isNull(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) + .isEqualTo(FLAMETHROWER); + } + + @Test + public void should_insert_entity_if_not_exists_reactive() { + UUID id = FLAMETHROWER.getId(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).singleElement().blockingGet()) + .isNull(); + { + ReactiveResultSet rs = dao.saveIfNotExistsReactive(FLAMETHROWER); + ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); + assertThat(row.wasApplied()).isTrue(); + assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isTrue(); + } + assertThat(Flowable.fromPublisher(dao.findByIdReactive(id)).blockingSingle()) + .isNotNull() + .isEqualTo(FLAMETHROWER); + { + ReactiveResultSet rs = dao.saveIfNotExistsReactive(FLAMETHROWER); + ReactiveRow row = Flowable.fromPublisher(rs).singleElement().blockingGet(); + assertThat(row.wasApplied()).isFalse(); + assertThat(Flowable.fromPublisher(rs.wasApplied()).blockingSingle()).isFalse(); + } + } + + @Mapper + public interface DseInventoryMapper { + + @DaoFactory + DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface DseProductDao { + + @Insert + ReactiveResultSet saveReactive(Product product); + + @Insert(ifNotExists = true) + ReactiveResultSet saveIfNotExistsReactive(Product product); + + @Select + MappedReactiveResultSet findByIdReactive(UUID productId); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java index 057e12af1fb..1bd899e4541 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/InventoryITBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +22,8 @@ import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; import com.datastax.oss.driver.api.mapper.annotations.Entity; import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.BaseCcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import java.util.List; import java.util.Objects; @@ -55,18 +58,24 @@ public abstract class InventoryITBase { protected static ProductSale MP3_DOWNLOAD_SALE_1 = new ProductSale(MP3_DOWNLOAD.getId(), DATE_3, 7, Uuids.startOf(915192000), 0.99, 12); - protected static List createStatements(CcmRule ccmRule) { + protected static List createStatements(BaseCcmRule ccmRule) { + return createStatements(ccmRule, false); + } + + protected static List createStatements(BaseCcmRule ccmRule, boolean requiresSasiIndex) { ImmutableList.Builder builder = ImmutableList.builder() .add( "CREATE TYPE dimensions(length int, width int, height int)", "CREATE TABLE product(id uuid PRIMARY KEY, description text, dimensions frozen)", + "CREATE TYPE dimensions2d(width int, height int)", + "CREATE TABLE product2d(id uuid PRIMARY KEY, description text, dimensions frozen)", "CREATE TABLE product_without_id(id uuid, clustering int, description text, " + "PRIMARY KEY((id), clustering))", "CREATE TABLE product_sale(id uuid, day text, ts uuid, customer_id int, price " + "double, count int, PRIMARY KEY ((id, day), customer_id, ts))"); - if (supportsSASI(ccmRule)) { + if (requiresSasiIndex && supportsSASI(ccmRule) && !isSasiBroken(ccmRule)) { builder.add( "CREATE CUSTOM INDEX product_description ON product(description) " + "USING 'org.apache.cassandra.index.sasi.SASIIndex' " @@ -84,9 +93,17 @@ protected static List createStatements(CcmRule ccmRule) { return builder.build(); } - private static final Version MINIMUM_SASI_VERSION = Version.parse("3.4.0"); + private static final Version MINIMUM_SASI_VERSION = + Objects.requireNonNull(Version.parse("3.4.0")); + private static final Version BROKEN_SASI_VERSION = Objects.requireNonNull(Version.parse("6.8.0")); + + protected static boolean isSasiBroken(BaseCcmRule ccmRule) { + // creating SASI indexes is broken in DSE 6.8.0 + return ccmRule.isDistributionOf( + BackendType.DSE, (dist, cass) -> dist.compareTo(BROKEN_SASI_VERSION) == 0); + } - protected static boolean supportsSASI(CcmRule ccmRule) { + protected static boolean supportsSASI(BaseCcmRule ccmRule) { return ccmRule.getCassandraVersion().compareTo(MINIMUM_SASI_VERSION) >= 0; } @@ -130,17 +147,17 @@ public void setDimensions(Dimensions dimensions) { } @Override - public boolean equals(Object o) { - if (this == o) { + public boolean equals(Object other) { + if (other == this) { return true; - } - if (o == null || getClass() != o.getClass()) { + } else if (other instanceof Product) { + Product that = (Product) other; + return Objects.equals(id, that.id) + && Objects.equals(description, that.description) + && Objects.equals(dimensions, that.dimensions); + } else { return false; } - Product product = (Product) o; - return Objects.equals(id, product.id) - && Objects.equals(description, product.description) - && Objects.equals(dimensions, product.dimensions); } @Override @@ -181,11 +198,15 @@ public void setDescription(String description) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProductWithoutId that = (ProductWithoutId) o; - return Objects.equals(description, that.description); + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof ProductWithoutId) { + ProductWithoutId that = (ProductWithoutId) other; + return Objects.equals(description, that.description); + } else { + return false; + } } @Override @@ -239,15 +260,15 @@ public void setHeight(int height) { } @Override - public boolean equals(Object o) { - if (this == o) { + public boolean equals(Object other) { + if (this == other) { return true; - } - if (o == null || getClass() != o.getClass()) { + } else if (other instanceof Dimensions) { + Dimensions that = (Dimensions) other; + return this.length == that.length && this.width == that.width && this.height == that.height; + } else { return false; } - Dimensions that = (Dimensions) o; - return length == that.length && width == that.width && height == that.height; } @Override @@ -280,11 +301,15 @@ public void setId(UUID id) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - OnlyPK onlyPK = (OnlyPK) o; - return Objects.equals(id, onlyPK.id); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof OnlyPK) { + OnlyPK that = (OnlyPK) other; + return Objects.equals(this.id, that.id); + } else { + return false; + } } @Override @@ -374,16 +399,20 @@ public void setCount(int count) { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProductSale that = (ProductSale) o; - return Double.compare(that.price, price) == 0 - && count == that.count - && id.equals(that.id) - && day.equals(that.day) - && ts.equals(that.ts) - && customerId == that.customerId; + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof ProductSale) { + ProductSale that = (ProductSale) other; + return Double.compare(this.price, that.price) == 0 + && this.count == that.count + && this.id.equals(that.id) + && this.day.equals(that.day) + && this.ts.equals(that.ts) + && this.customerId == that.customerId; + } else { + return false; + } } @Override diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java index 095ea3113e1..974e4bad7c3 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NamingStrategyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -61,17 +63,18 @@ @Category(ParallelizableTests.class) public class NamingStrategyIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static TestMapper mapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); for (String query : ImmutableList.of( @@ -80,7 +83,7 @@ public static void setup() { "CREATE TABLE test_NameConverterEntity(test_entityId int primary key)", "CREATE TABLE custom_entity(custom_id int primary key)")) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } mapper = new NamingStrategyIT_TestMapperBuilder(session).build(); @@ -88,7 +91,7 @@ public static void setup() { @Test public void should_map_entity_with_default_naming_strategy() { - DefaultStrategyEntityDao dao = mapper.defaultStrategyEntityDao(sessionRule.keyspace()); + DefaultStrategyEntityDao dao = mapper.defaultStrategyEntityDao(SESSION_RULE.keyspace()); DefaultStrategyEntity entity = new DefaultStrategyEntity(1); dao.save(entity); @@ -98,7 +101,7 @@ public void should_map_entity_with_default_naming_strategy() { @Test public void should_map_entity_with_non_default_convention() { - UpperSnakeCaseEntityDao dao = mapper.upperSnakeCaseEntityDao(sessionRule.keyspace()); + UpperSnakeCaseEntityDao dao = mapper.upperSnakeCaseEntityDao(SESSION_RULE.keyspace()); UpperSnakeCaseEntity entity = new UpperSnakeCaseEntity(1); dao.save(entity); @@ -108,7 +111,7 @@ public void should_map_entity_with_non_default_convention() { @Test public void should_map_entity_with_name_converter() { - NameConverterEntityDao dao = mapper.nameConverterEntityDao(sessionRule.keyspace()); + NameConverterEntityDao dao = mapper.nameConverterEntityDao(SESSION_RULE.keyspace()); NameConverterEntity entity = new NameConverterEntity(1); dao.save(entity); @@ -118,7 +121,7 @@ public void should_map_entity_with_name_converter() { @Test public void should_map_entity_with_custom_names() { - CustomNamesEntityDao dao = mapper.customNamesEntityDao(sessionRule.keyspace()); + CustomNamesEntityDao dao = mapper.customNamesEntityDao(SESSION_RULE.keyspace()); CustomNamesEntity entity = new CustomNamesEntity(1); dao.save(entity); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java index 06c17f4edc7..d61b6f6e628 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NestedUdtIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +18,17 @@ package com.datastax.oss.driver.mapper; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; -import com.datastax.oss.driver.api.core.data.GettableByName; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.api.mapper.annotations.Dao; import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; @@ -30,9 +38,12 @@ import com.datastax.oss.driver.api.mapper.annotations.Mapper; import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.annotations.SetEntity; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; @@ -43,6 +54,7 @@ import java.util.Objects; import java.util.Set; import java.util.UUID; +import org.assertj.core.util.Lists; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -53,30 +65,37 @@ /** Tests that entities with UDTs nested at various levels are properly mapped. */ @Category(ParallelizableTests.class) -@CassandraRequirement(min = "2.2", description = "support for unset values") +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.2", + description = "support for unset values") public class NestedUdtIT { - private static CcmRule ccm = CcmRule.getInstance(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final UUID CONTAINER_ID = UUID.randomUUID(); - private static UUID CONTAINER_ID = UUID.randomUUID(); private static final Container SAMPLE_CONTAINER = new Container( CONTAINER_ID, - ImmutableList.of(new Type1("a"), new Type1("b")), + ImmutableList.of(new Type1("a1", "a2"), new Type1("b1", "b2")), ImmutableMap.of( "cd", - ImmutableList.of(new Type1("c"), new Type1("d")), + ImmutableList.of(new Type1("c1", "c2"), new Type1("d1", "d2")), "ef", - ImmutableList.of(new Type1("e"), new Type1("f"))), + ImmutableList.of(new Type1("e1", "e2"), new Type1("f1", "f2"))), ImmutableMap.of( - new Type1("12"), - ImmutableSet.of(ImmutableList.of(new Type2(1)), ImmutableList.of(new Type2(2)))), + new Type1("12", "34"), + ImmutableSet.of( + ImmutableList.of(new Type2(1, 2)), ImmutableList.of(new Type2(3, 4)))), ImmutableMap.of( - new Type1("12"), ImmutableMap.of("12", ImmutableSet.of(new Type2(1), new Type2(2))))); + new Type1("12", "34"), + ImmutableMap.of("12", ImmutableSet.of(new Type2(1, 2), new Type2(3, 4))))); private static final Container SAMPLE_CONTAINER_NULL_LIST = new Container( @@ -84,53 +103,78 @@ public class NestedUdtIT { null, ImmutableMap.of( "cd", - ImmutableList.of(new Type1("c"), new Type1("d")), + ImmutableList.of(new Type1("c1", "c2"), new Type1("d1", "d2")), "ef", - ImmutableList.of(new Type1("e"), new Type1("f"))), + ImmutableList.of(new Type1("e1", "e2"), new Type1("f1", "f2"))), ImmutableMap.of( - new Type1("12"), - ImmutableSet.of(ImmutableList.of(new Type2(1)), ImmutableList.of(new Type2(2)))), + new Type1("12", "34"), + ImmutableSet.of( + ImmutableList.of(new Type2(1, 2)), ImmutableList.of(new Type2(3, 4)))), ImmutableMap.of( - new Type1("12"), ImmutableMap.of("12", ImmutableSet.of(new Type2(1), new Type2(2))))); + new Type1("12", "34"), + ImmutableMap.of("12", ImmutableSet.of(new Type2(1, 2), new Type2(3, 4))))); private static ContainerDao containerDao; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); - - for (String query : - ImmutableList.of( - "CREATE TYPE type1(s text)", - "CREATE TYPE type2(i int)", - "CREATE TABLE container(id uuid PRIMARY KEY, " - + "list frozen>, " - + "map1 frozen>>, " - + "map2 frozen>>>," - + "map3 frozen>>>" - + ")")) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } + CqlSession session = SESSION_RULE.session(); + + SchemaChangeSynchronizer.withLock( + () -> { + for (String query : + ImmutableList.of( + "CREATE TYPE type1(s1 text, s2 text)", + "CREATE TYPE type2(i1 int, i2 int)", + "CREATE TYPE type1_partial(s1 text)", + "CREATE TYPE type2_partial(i1 int)", + "CREATE TABLE container(id uuid PRIMARY KEY, " + + "list frozen>, " + + "map1 frozen>>, " + + "map2 frozen>>>," + + "map3 frozen>>>" + + ")", + "CREATE TABLE container_partial(id uuid PRIMARY KEY, " + + "list frozen>, " + + "map1 frozen>>, " + + "map2 frozen>>>," + + "map3 frozen>>>" + + ")")) { + session.execute( + SimpleStatement.builder(query) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + }); + + UserDefinedType type1Partial = + session + .getKeyspace() + .flatMap(ks -> session.getMetadata().getKeyspace(ks)) + .flatMap(ks -> ks.getUserDefinedType("type1_partial")) + .orElseThrow(AssertionError::new); + + session.execute( + SimpleStatement.newInstance( + "INSERT INTO container_partial (id, list) VALUES (?, ?)", + SAMPLE_CONTAINER.getId(), + Lists.newArrayList(type1Partial.newValue("a"), type1Partial.newValue("b")))); UdtsMapper udtsMapper = new NestedUdtIT_UdtsMapperBuilder(session).build(); - containerDao = udtsMapper.containerDao(sessionRule.keyspace()); + containerDao = udtsMapper.containerDao(SESSION_RULE.keyspace()); } @Before public void clearContainerData() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder("TRUNCATE container") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); } @Test public void should_insert_and_retrieve_entity_with_nested_udts() { - // Given - CqlSession session = sessionRule.session(); - // When containerDao.save(SAMPLE_CONTAINER); Container retrievedEntity = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); @@ -142,8 +186,6 @@ public void should_insert_and_retrieve_entity_with_nested_udts() { @Test public void should_insert_do_not_set_to_null_udts() { // Given - CqlSession session = sessionRule.session(); - containerDao.save(SAMPLE_CONTAINER); Container retrievedEntity = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); @@ -158,8 +200,6 @@ public void should_insert_do_not_set_to_null_udts() { @Test public void should_insert_set_to_null_udts() { // Given - CqlSession session = sessionRule.session(); - containerDao.save(SAMPLE_CONTAINER); Container retrievedEntity = containerDao.loadByPk(SAMPLE_CONTAINER.getId()); @@ -171,6 +211,71 @@ public void should_insert_set_to_null_udts() { assertThat(retrievedEntitySecond.list).isEmpty(); } + @Test + public void should_get_entity_from_complete_row() { + CqlSession session = SESSION_RULE.session(); + containerDao.save(SAMPLE_CONTAINER); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT * FROM container WHERE id = ?", SAMPLE_CONTAINER.getId())); + Row row = rs.one(); + assertThat(row).isNotNull(); + Container actual = containerDao.get(row); + assertThat(actual).isEqualTo(SAMPLE_CONTAINER); + } + + @Test + public void should_not_get_entity_from_partial_row_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + containerDao.save(SAMPLE_CONTAINER); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT id FROM container WHERE id = ?", SAMPLE_CONTAINER.getId())); + Row row = rs.one(); + assertThat(row).isNotNull(); + Throwable error = catchThrowable(() -> containerDao.get(row)); + assertThat(error).hasMessage("list is not a column in this row"); + } + + @Test + public void should_get_entity_from_partial_row_when_lenient() { + CqlSession session = SESSION_RULE.session(); + ResultSet rs = + session.execute( + SimpleStatement.newInstance( + "SELECT id, list FROM container_partial WHERE id = ?", SAMPLE_CONTAINER.getId())); + Row row = rs.one(); + assertThat(row).isNotNull(); + Container actual = containerDao.getLenient(row); + assertThat(actual.getId()).isEqualTo(SAMPLE_CONTAINER.getId()); + assertThat(actual.getList()).containsExactly(new Type1("a", null), new Type1("b", null)); + assertThat(actual.getMap1()).isNull(); + assertThat(actual.getMap2()).isNull(); + assertThat(actual.getMap3()).isNull(); + } + + @Test + public void should_set_entity_on_partial_statement_builder_when_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = + session.prepare("INSERT INTO container_partial (id, list) VALUES (?, ?)"); + BoundStatementBuilder builder = ps.boundStatementBuilder(); + containerDao.setLenient(SAMPLE_CONTAINER, builder); + assertThat(builder.getUuid(0)).isEqualTo(SAMPLE_CONTAINER.getId()); + assertThat(builder.getList(1, UdtValue.class)).hasSize(2); + } + + @Test + public void should_not_set_entity_on_partial_statement_builder_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO container (id, list) VALUES (?, ?)"); + Throwable error = + catchThrowable(() -> containerDao.set(SAMPLE_CONTAINER, ps.boundStatementBuilder())); + assertThat(error).hasMessage("map1 is not a variable in this bound statement"); + } + @Mapper public interface UdtsMapper { @DaoFactory @@ -193,7 +298,16 @@ public interface ContainerDao { void saveSetToNull(Container container); @GetEntity - Container get(GettableByName source); + Container get(Row source); + + @GetEntity(lenient = true) + Container getLenient(Row source); + + @SetEntity + void set(Container container, BoundStatementBuilder target); + + @SetEntity(lenient = true) + void setLenient(Container container, BoundStatementBuilder target); } @Entity @@ -284,73 +398,93 @@ public int hashCode() { @Entity public static class Type1 { - private String s; + private String s1; + private String s2; public Type1() {} - public Type1(String s) { - this.s = s; + public Type1(String s1, String s2) { + this.s1 = s1; + this.s2 = s2; } - public String getS() { - return s; + public String getS1() { + return s1; } - public void setS(String s) { - this.s = s; + public void setS1(String s1) { + this.s1 = s1; + } + + public String getS2() { + return s2; + } + + public void setS2(String s2) { + this.s2 = s2; } @Override - public boolean equals(Object other) { - if (other == this) { + public boolean equals(Object o) { + if (this == o) { return true; - } else if (other instanceof Type1) { - Type1 that = (Type1) other; - return Objects.equals(this.s, that.s); - } else { + } + if (!(o instanceof Type1)) { return false; } + Type1 type1 = (Type1) o; + return Objects.equals(s1, type1.s1) && Objects.equals(s2, type1.s2); } @Override public int hashCode() { - return s == null ? 0 : s.hashCode(); + return Objects.hash(s1, s2); } } @Entity public static class Type2 { - private int i; + private int i1; + private int i2; public Type2() {} - public Type2(int i) { - this.i = i; + public Type2(int i1, int i2) { + this.i1 = i1; + this.i2 = i2; } - public int getI() { - return i; + public int getI1() { + return i1; } - public void setI(int i) { - this.i = i; + public void setI1(int i1) { + this.i1 = i1; + } + + public int getI2() { + return i2; + } + + public void setI2(int i2) { + this.i2 = i2; } @Override - public boolean equals(Object other) { - if (other == this) { + public boolean equals(Object o) { + if (this == o) { return true; - } else if (other instanceof Type2) { - Type2 that = (Type2) other; - return this.i == that.i; - } else { + } + if (!(o instanceof Type2)) { return false; } + Type2 type2 = (Type2) o; + return i1 == type2.i1 && i2 == type2.i2; } @Override public int hashCode() { - return i; + return Objects.hash(i1, i2); } } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java index c3b1d5ddfbc..ea96c12e57b 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/NullSavingStrategyIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,9 +38,11 @@ import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; import java.util.Objects; import java.util.UUID; +import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -49,35 +53,67 @@ @Category(ParallelizableTests.class) public class NullSavingStrategyIT { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = - SessionRule.builder(ccm) - .withConfigLoader( - DriverConfigLoader.programmaticBuilder() - .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") - .build()) - .build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + // JAVA-3076: V3 protocol calls that could trigger cassandra to issue client warnings appear to be + // inherently unstable when used at the same time as V4+ protocol clients (common since this is + // part of the parallelizable test suite). + // + // For this test we'll use latest protocol version for SessionRule set-up, which creates the + // keyspace and could potentially result in warning about too many keyspaces, and then create a + // new client for the tests to use, which they access via the static InventoryMapper instance + // `mapper`. + // + // This additional client is created in the @BeforeClass method #setup() and guaranteed to be + // closed in @AfterClass method #teardown(). + // + // Note: The standard junit runner executes rules before class/test setup so the order of + // execution will be CcmRule#before > SessionRule#before > NullSavingStrategyIT#setup, meaning + // CCM_RULE/SESSION_RULE should be fully initialized by the time #setup() is invoked. + private static CqlSession v3Session; + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static InventoryMapper mapper; - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); - @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); - session.execute( - SimpleStatement.builder( - "CREATE TABLE product_simple(id uuid PRIMARY KEY, description text)") - .setExecutionProfile(sessionRule.slowProfile()) - .build()); - - mapper = new NullSavingStrategyIT_InventoryMapperBuilder(session).build(); + // setup table for use in tests, this can use the default session + SESSION_RULE + .session() + .execute( + SimpleStatement.builder( + "CREATE TABLE product_simple(id uuid PRIMARY KEY, description text)") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + + // Create V3 protocol session for use in tests, will be closed in #teardown() + v3Session = + SessionUtils.newSession( + CCM_RULE, + SESSION_RULE.keyspace(), + DriverConfigLoader.programmaticBuilder() + .withString(DefaultDriverOption.PROTOCOL_VERSION, "V3") + .build()); + + // Hand V3 session to InventoryMapper which the tests will use to perform db calls + mapper = new NullSavingStrategyIT_InventoryMapperBuilder(v3Session).build(); + } + + @AfterClass + public static void teardown() { + // Close V3 session (SESSION_RULE will be closed separately by @ClassRule handling) + if (v3Session != null) { + v3Session.close(); + } } @Test public void should_throw_when_try_to_construct_dao_with_DO_NOT_SET_strategy_for_V3_protocol() { - assertThatThrownBy(() -> mapper.productDao(sessionRule.keyspace())) + assertThatThrownBy(() -> mapper.productDao(SESSION_RULE.keyspace())) .isInstanceOf(MapperException.class) .hasMessage("You cannot use NullSavingStrategy.DO_NOT_SET for protocol version V3."); } @@ -85,7 +121,7 @@ public void should_throw_when_try_to_construct_dao_with_DO_NOT_SET_strategy_for_ @Test public void should_throw_when_try_to_construct_dao_with_DO_NOT_SET_implicit_strategy_for_V3_protocol() { - assertThatThrownBy(() -> mapper.productDaoImplicit(sessionRule.keyspace())) + assertThatThrownBy(() -> mapper.productDaoImplicit(SESSION_RULE.keyspace())) .isInstanceOf(MapperException.class) .hasMessage("You cannot use NullSavingStrategy.DO_NOT_SET for protocol version V3."); } @@ -93,27 +129,27 @@ public void should_throw_when_try_to_construct_dao_with_DO_NOT_SET_strategy_for_ @Test public void should_throw_when_try_to_construct_dao_with_DO_NOT_SET_strategy_set_globally_for_V3_protocol() { - assertThatThrownBy(() -> mapper.productDaoDefault(sessionRule.keyspace())) + assertThatThrownBy(() -> mapper.productDaoDefault(SESSION_RULE.keyspace())) .isInstanceOf(MapperException.class) .hasMessage("You cannot use NullSavingStrategy.DO_NOT_SET for protocol version V3."); } @Test public void should_do_not_throw_when_construct_dao_with_global_level_SET_TO_NULL() { - assertThatCode(() -> mapper.productDaoGlobalLevelSetToNull(sessionRule.keyspace())) + assertThatCode(() -> mapper.productDaoGlobalLevelSetToNull(SESSION_RULE.keyspace())) .doesNotThrowAnyException(); } @Test public void should_do_not_throw_when_construct_dao_with_parent_interface_SET_TO_NULL() { - assertThatCode(() -> mapper.productDaoSetToNullFromParentInterface(sessionRule.keyspace())) + assertThatCode(() -> mapper.productDaoSetToNullFromParentInterface(SESSION_RULE.keyspace())) .doesNotThrowAnyException(); } @Test public void should_do_not_throw_when_construct_dao_with_global_level_DO_NOT_SET_and_local_override_to_SET_TO_NULL() { - assertThatCode(() -> mapper.productDaoLocalOverride(sessionRule.keyspace())) + assertThatCode(() -> mapper.productDaoLocalOverride(SESSION_RULE.keyspace())) .doesNotThrowAnyException(); } @@ -229,12 +265,17 @@ public void setDescription(String description) { } @Override - public boolean equals(Object o) { - - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProductSimple that = (ProductSimple) o; - return Objects.equals(id, that.id) && Objects.equals(description, that.description); + public boolean equals(Object other) { + + if (this == other) { + return true; + } else if (other instanceof ProductSimple) { + ProductSimple that = (ProductSimple) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.description, that.description); + } else { + return false; + } } @Override diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java index 024cff2e172..d63e3834188 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/PrimitivesIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,34 +32,37 @@ import com.datastax.oss.driver.api.mapper.annotations.Select; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import java.util.Objects; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @Category(ParallelizableTests.class) +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.2", + description = "smallint is a reserved keyword in 2.1") public class PrimitivesIT { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @Rule public ExpectedException thrown = ExpectedException.none(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static TestMapper mapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder( "CREATE TABLE primitives_entity(" @@ -68,14 +73,14 @@ public static void setup() { + "long_col bigint, " + "float_col float," + "double_col double)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); mapper = new PrimitivesIT_TestMapperBuilder(session).build(); } @Test public void should_not_include_computed_values_in_insert() { - PrimitivesDao primitivesDao = mapper.primitivesDao(sessionRule.keyspace()); + PrimitivesDao primitivesDao = mapper.primitivesDao(SESSION_RULE.keyspace()); PrimitivesEntity expected = new PrimitivesEntity(0, true, (byte) 2, (short) 3, 4L, 5.0f, 6.0d); primitivesDao.save(expected); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ProfileIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ProfileIT.java new file mode 100644 index 00000000000..3ed2a48cced --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/ProfileIT.java @@ -0,0 +1,410 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.query; +import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoProfile; +import com.datastax.oss.driver.api.mapper.annotations.Delete; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.Query; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.annotations.StatementAttributes; +import com.datastax.oss.driver.api.mapper.annotations.Update; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.api.testinfra.session.SessionUtils; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.protocol.internal.Message; +import com.datastax.oss.protocol.internal.request.Execute; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import com.datastax.oss.simulacron.common.cluster.QueryLog; +import com.datastax.oss.simulacron.common.stubbing.PrimeDsl; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.UnaryOperator; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class ProfileIT { + + private static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(1)); + + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE) + .withConfigLoader( + SessionUtils.configLoaderBuilder() + .startProfile("cl_one") + .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "ONE") + .build()) + .build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); + + private static final Simple SAMPLE_ENTITY = new Simple(UUID.randomUUID(), "DATA"); + + private static DriverExecutionProfile clTwoProfile; + private MapperBuilder mapperBuilder; + + @BeforeClass + public static void setupClass() { + primeDeleteQuery(); + primeInsertQuery(); + primeSelectQuery(); + primeCountQuery(); + primeUpdateQuery(); + + // Deliberately based on the default profile, so that we can assert that a dynamically-set + // option is correctly taken into account + clTwoProfile = + SESSION_RULE + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "TWO"); + } + + @Before + public void setup() { + SIMULACRON_RULE.cluster().clearLogs(); + mapperBuilder = SimpleMapper.builder(SESSION_RULE.session()); + } + + @Test + public void should_build_dao_with_profile_name() { + SimpleMapper mapper = mapperBuilder.build(); + SimpleDao dao = mapper.simpleDao("cl_one"); + assertClForAllQueries(dao, ConsistencyLevel.ONE); + } + + @Test + public void should_build_dao_with_profile() { + SimpleMapper mapper = mapperBuilder.build(); + SimpleDao dao = mapper.simpleDao(clTwoProfile); + assertClForAllQueries(dao, ConsistencyLevel.TWO); + } + + @Test + public void should_inherit_mapper_profile_name() { + SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfileName("cl_one").build(); + SimpleDao dao = mapper.simpleDao(); + assertClForAllQueries(dao, ConsistencyLevel.ONE); + } + + @Test + public void should_inherit_mapper_profile() { + SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfile(clTwoProfile).build(); + SimpleDao dao = mapper.simpleDao(); + assertClForAllQueries(dao, ConsistencyLevel.TWO); + } + + @Test + public void should_override_mapper_profile_name() { + SimpleMapper mapper = + mapperBuilder + .withDefaultExecutionProfileName("defaultProfile") // doesn't need to exist + .build(); + SimpleDao dao = mapper.simpleDao("cl_one"); + assertClForAllQueries(dao, ConsistencyLevel.ONE); + } + + @Test + public void should_override_mapper_profile() { + DriverExecutionProfile clThreeProfile = + SESSION_RULE + .session() + .getContext() + .getConfig() + .getDefaultProfile() + .withString(DefaultDriverOption.REQUEST_CONSISTENCY, "THREE"); + SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfile(clThreeProfile).build(); + SimpleDao dao = mapper.simpleDao(clTwoProfile); + assertClForAllQueries(dao, ConsistencyLevel.TWO); + } + + @Test + public void should_override_mapper_profile_name_with_a_profile() { + SimpleMapper mapper = + mapperBuilder + .withDefaultExecutionProfileName("defaultProfile") // doesn't need to exist + .build(); + SimpleDao dao = mapper.simpleDao(clTwoProfile); + assertClForAllQueries(dao, ConsistencyLevel.TWO); + } + + @Test + public void should_override_mapper_profile_with_a_name() { + SimpleMapper mapper = mapperBuilder.withDefaultExecutionProfile(clTwoProfile).build(); + SimpleDao dao = mapper.simpleDao("cl_one"); + assertClForAllQueries(dao, ConsistencyLevel.ONE); + } + + @Test + public void should_use_default_when_no_profile() { + SimpleMapper mapper = mapperBuilder.build(); + SimpleDao dao = mapper.simpleDao(); + // Default CL inherited from reference.conf + assertClForAllQueries(dao, ConsistencyLevel.LOCAL_ONE); + } + + private void assertClForAllQueries(SimpleDao dao, ConsistencyLevel expectedLevel) { + dao.save(SAMPLE_ENTITY); + assertServerSideCl(expectedLevel); + dao.delete(SAMPLE_ENTITY); + assertServerSideCl(expectedLevel); + dao.update(SAMPLE_ENTITY); + assertServerSideCl(expectedLevel); + dao.findByPk(SAMPLE_ENTITY.pk); + assertServerSideCl(expectedLevel); + + // Special cases: profile defined at the method level with statement attributes, should override + // dao-level profile. + dao.saveWithClOne(SAMPLE_ENTITY); + assertServerSideCl(ConsistencyLevel.ONE); + dao.saveWithCustomAttributes(SAMPLE_ENTITY, bs -> bs.setExecutionProfileName("cl_one")); + assertServerSideCl(ConsistencyLevel.ONE); + } + + private void assertServerSideCl(ConsistencyLevel expectedCl) { + List queryLogs = SIMULACRON_RULE.cluster().getLogs().getQueryLogs(); + QueryLog lastLog = queryLogs.get(queryLogs.size() - 1); + Message message = lastLog.getFrame().message; + assertThat(message).isInstanceOf(Execute.class); + Execute queryExecute = (Execute) message; + assertThat(queryExecute.options.consistency).isEqualTo(expectedCl.getProtocolCode()); + } + + private static void primeInsertQuery() { + LinkedHashMap params = + new LinkedHashMap<>( + ImmutableMap.of("pk", SAMPLE_ENTITY.getPk(), "data", SAMPLE_ENTITY.getData())); + LinkedHashMap paramTypes = + new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "INSERT INTO ks.simple (pk,data) VALUES (:pk,:data)", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), + params, + paramTypes)) + .then(noRows())); + } + + private static void primeDeleteQuery() { + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", SAMPLE_ENTITY.getPk())); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "DELETE FROM ks.simple WHERE pk=:pk", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), + params, + paramTypes)) + .then(noRows()) + .delay(1, TimeUnit.MILLISECONDS)); + } + + private static void primeSelectQuery() { + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", SAMPLE_ENTITY.getPk())); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "SELECT pk,data FROM ks.simple WHERE pk=:pk", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), + params, + paramTypes)) + .then(noRows()) + .delay(1, TimeUnit.MILLISECONDS)); + } + + private static void primeCountQuery() { + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", SAMPLE_ENTITY.getPk())); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "SELECT count(*) FROM ks.simple WHERE pk=:pk", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.LOCAL_ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.TWO), + params, + paramTypes)) + .then(PrimeDsl.rows().row("count", 1L).columnTypes("count", "bigint").build()) + .delay(1, TimeUnit.MILLISECONDS)); + } + + private static void primeUpdateQuery() { + LinkedHashMap params = + new LinkedHashMap<>( + ImmutableMap.of("pk", SAMPLE_ENTITY.getPk(), "data", SAMPLE_ENTITY.getData())); + LinkedHashMap paramTypes = + new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); + SIMULACRON_RULE + .cluster() + .prime( + when(query( + "UPDATE ks.simple SET data=:data WHERE pk=:pk", + Lists.newArrayList( + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ONE, + com.datastax.oss.simulacron.common.codec.ConsistencyLevel.ANY), + params, + paramTypes)) + .then(noRows())); + } + + @Mapper + public interface SimpleMapper { + @DaoFactory + SimpleDao simpleDao(); + + @DaoFactory + SimpleDao simpleDao(@DaoProfile String executionProfile); + + @DaoFactory + SimpleDao simpleDao(@DaoProfile DriverExecutionProfile executionProfile); + + static MapperBuilder builder(CqlSession session) { + return new ProfileIT_SimpleMapperBuilder(session); + } + } + + @Dao + public interface SimpleDao { + @Insert + void save(Simple simple); + + @Delete + void delete(Simple simple); + + @Select + @SuppressWarnings("UnusedReturnValue") + Simple findByPk(UUID pk); + + @Query("SELECT count(*) FROM ks.simple WHERE pk=:pk") + long count(UUID pk); + + @Update + void update(Simple simple); + + @Insert + @StatementAttributes(executionProfileName = "cl_one") + void saveWithClOne(Simple simple); + + @Insert + void saveWithCustomAttributes(Simple simple, UnaryOperator attributes); + } + + @Entity(defaultKeyspace = "ks") + public static class Simple { + @PartitionKey private UUID pk; + private String data; + + public Simple() {} + + public Simple(UUID pk, String data) { + this.pk = pk; + this.data = data; + } + + public UUID getPk() { + return pk; + } + + public String getData() { + return data; + } + + public void setPk(UUID pk) { + + this.pk = pk; + } + + public void setData(String data) { + this.data = data; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ProfileIT.Simple)) { + return false; + } + ProfileIT.Simple simple = (ProfileIT.Simple) o; + return Objects.equals(pk, simple.pk) && Objects.equals(data, simple.data); + } + + @Override + public int hashCode() { + + return Objects.hash(pk, data); + } + + @Override + public String toString() { + return "Simple{" + "pk=" + pk + ", data='" + data + '\'' + '}'; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java index 1cdb28ddedf..9391c0363f8 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryKeyspaceAndTableIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.mapper; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -35,10 +38,8 @@ import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @@ -46,21 +47,21 @@ @Category(ParallelizableTests.class) public class QueryKeyspaceAndTableIT { - private static CcmRule ccm = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static final CqlIdentifier FOO_TABLE_ID = CqlIdentifier.fromCql("foo"); private static final CqlIdentifier OTHER_KEYSPACE = CqlIdentifier.fromCql(QueryKeyspaceAndTableIT.class.getSimpleName() + "_alt"); - @Rule public ExpectedException thrown = ExpectedException.none(); - private static TestMapper mapper; @BeforeClass public static void createSchema() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); for (String query : ImmutableList.of( @@ -70,7 +71,7 @@ public static void createSchema() { OTHER_KEYSPACE.asCql(false)), String.format("CREATE TABLE %s.foo(k int PRIMARY KEY)", OTHER_KEYSPACE.asCql(false)))) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } session.execute("INSERT INTO foo (k) VALUES (1)"); @@ -85,28 +86,31 @@ public static void createSchema() { @Test public void should_substitute_keyspaceId_and_tableId() { DaoWithKeyspaceAndTableId dao = - mapper.daoWithKeyspaceAndTableId(sessionRule.keyspace(), FOO_TABLE_ID); + mapper.daoWithKeyspaceAndTableId(SESSION_RULE.keyspace(), FOO_TABLE_ID); assertThat(dao.count()).isEqualTo(1); } @Test public void should_fail_to_substitute_keyspaceId_if_dao_has_no_keyspace() { - thrown.expect(MapperException.class); - thrown.expectMessage( - "Cannot substitute ${keyspaceId} in query " - + "'SELECT count(*) FROM ${keyspaceId}.${tableId}': " - + "the DAO wasn't built with a keyspace"); - mapper.daoWithKeyspaceAndTableId(null, FOO_TABLE_ID); + Throwable t = catchThrowable(() -> mapper.daoWithKeyspaceAndTableId(null, FOO_TABLE_ID)); + assertThat(t) + .isInstanceOf(MapperException.class) + .hasMessage( + "Cannot substitute ${keyspaceId} in query " + + "'SELECT count(*) FROM ${keyspaceId}.${tableId}': " + + "the DAO wasn't built with a keyspace"); } @Test public void should_fail_to_substitute_tableId_if_dao_has_no_table() { - thrown.expect(MapperException.class); - thrown.expectMessage( - "Cannot substitute ${tableId} in query " - + "'SELECT count(*) FROM ${keyspaceId}.${tableId}': " - + "the DAO wasn't built with a table"); - mapper.daoWithKeyspaceAndTableId(sessionRule.keyspace(), null); + Throwable t = + catchThrowable(() -> mapper.daoWithKeyspaceAndTableId(SESSION_RULE.keyspace(), null)); + assertThat(t) + .isInstanceOf(MapperException.class) + .hasMessage( + "Cannot substitute ${tableId} in query " + + "'SELECT count(*) FROM ${keyspaceId}.${tableId}': " + + "the DAO wasn't built with a table"); } @Test @@ -123,12 +127,14 @@ public void should_not_use_keyspace_in_qualifiedTableId_when_dao_has_no_keyspace @Test public void should_fail_to_substitute_qualifiedTableId_if_dao_has_no_table() { - thrown.expect(MapperException.class); - thrown.expectMessage( - "Cannot substitute ${qualifiedTableId} in query " - + "'SELECT count(*) FROM ${qualifiedTableId}': " - + "the DAO wasn't built with a table"); - mapper.daoWithQualifiedTableId(sessionRule.keyspace(), null); + Throwable t = + catchThrowable(() -> mapper.daoWithQualifiedTableId(SESSION_RULE.keyspace(), null)); + assertThat(t) + .isInstanceOf(MapperException.class) + .hasMessage( + "Cannot substitute ${qualifiedTableId} in query " + + "'SELECT count(*) FROM ${qualifiedTableId}': " + + "the DAO wasn't built with a table"); } @Dao diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java index 2b8d887927b..4bfda4fdfdb 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryProviderIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,6 +45,8 @@ import com.datastax.oss.driver.categories.ParallelizableTests; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -53,9 +57,11 @@ @Category(ParallelizableTests.class) public class QueryProviderIT { - private static CcmRule ccm = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); // Dummy counter to exercize the "custom state" feature: it gets incremented each time the query // provider is called. @@ -65,21 +71,21 @@ public class QueryProviderIT { @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder( "CREATE TABLE sensor_reading(id int, month int, day int, value double, " + "PRIMARY KEY (id, month, day)) " + "WITH CLUSTERING ORDER BY (month DESC, day DESC)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); SensorMapper mapper = new QueryProviderIT_SensorMapperBuilder(session) .withCustomState("executionCount", executionCount) .build(); - dao = mapper.sensorDao(sessionRule.keyspace()); + dao = mapper.sensorDao(SESSION_RULE.keyspace()); } @Test @@ -113,9 +119,15 @@ public interface SensorMapper { @Dao @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) public interface SensorDao { + @QueryProvider(providerClass = FindSliceProvider.class, entityHelpers = SensorReading.class) PagingIterable findSlice(int id, Integer month, Integer day); + @QueryProvider( + providerClass = FindSliceStreamProvider.class, + entityHelpers = SensorReading.class) + Stream findSliceAsStream(int id, Integer month, Integer day); + @Insert void save(SensorReading reading); } @@ -158,7 +170,21 @@ public PagingIterable findSlice(int id, Integer month, Integer da boundStatementBuilder = boundStatementBuilder.setInt("day", day); } } - return session.execute(boundStatementBuilder.build()).map(sensorReadingHelper::get); + return session + .execute(boundStatementBuilder.build()) + .map(row -> sensorReadingHelper.get(row, false)); + } + } + + public static class FindSliceStreamProvider extends FindSliceProvider { + + public FindSliceStreamProvider( + MapperContext context, EntityHelper sensorReadingHelper) { + super(context, sensorReadingHelper); + } + + public Stream findSliceAsStream(int id, Integer month, Integer day) { + return StreamSupport.stream(findSlice(id, month, day).spliterator(), false); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReactiveIT.java new file mode 100644 index 00000000000..d04ab5150ec --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReactiveIT.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.Query; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import io.reactivex.Flowable; +import java.util.List; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class QueryReactiveIT { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + private static DseTestDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = sessionRule.session(); + + session.execute( + SimpleStatement.builder( + "CREATE TABLE test_entity(id int, rank int, value int, PRIMARY KEY(id, rank))") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + + TestMapper testMapper = new QueryReactiveIT_TestMapperBuilder(session).build(); + dao = testMapper.productDao(sessionRule.keyspace()); + } + + @Before + public void insertData() { + for (int i = 0; i < 10; i++) { + dao.insert(new TestEntity(1, i, i)); + } + } + + @Test + public void should_query_reactive() { + ReactiveResultSet rs = dao.findByIdReactive(1); + assertThat(Flowable.fromPublisher(rs).count().blockingGet()).isEqualTo(10); + } + + @Test + public void should_query_reactive_mapped() { + MappedReactiveResultSet rs = dao.findByIdReactiveMapped(1); + List results = Flowable.fromPublisher(rs).toList().blockingGet(); + assertThat(results).hasSize(10); + assertThat(results).extracting("rank").containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + } + + @Mapper + public interface TestMapper { + + @DaoFactory + DseTestDao productDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface DseTestDao { + + @Insert + void insert(TestEntity entity); + + @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") + MappedReactiveResultSet findByIdReactiveMapped(int id); + + @Query("SELECT * FROM ${keyspaceId}.test_entity WHERE id = :id") + ReactiveResultSet findByIdReactive(int id); + } + + @Entity + public static class TestEntity { + @PartitionKey private int id; + + @ClusteringColumn private int rank; + + private Integer value; + + public TestEntity() {} + + public TestEntity(int id, int rank, Integer value) { + this.id = id; + this.rank = rank; + this.value = value; + } + + public int getId() { + return id; + } + + public void setId(int id) { + this.id = id; + } + + public int getRank() { + return rank; + } + + public void setRank(int rank) { + this.rank = rank; + } + + public Integer getValue() { + return value; + } + + public void setValue(Integer value) { + this.value = value; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java index 0b156611347..c6e90912206 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/QueryReturnTypesIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.mapper; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -46,13 +49,13 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.rules.ExpectedException; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @@ -60,26 +63,26 @@ @Category(ParallelizableTests.class) public class QueryReturnTypesIT { - private static CcmRule ccm = CcmRule.getInstance(); - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @Rule public ExpectedException thrown = ExpectedException.none(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static TestDao dao; @BeforeClass public static void createSchema() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder( "CREATE TABLE test_entity(id int, rank int, value int, PRIMARY KEY(id, rank))") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); TestMapper mapper = new QueryReturnTypesIT_TestMapperBuilder(session).build(); - dao = mapper.dao(sessionRule.keyspace(), CqlIdentifier.fromCql("test_entity")); + dao = mapper.dao(SESSION_RULE.keyspace(), CqlIdentifier.fromCql("test_entity")); } @Before @@ -120,11 +123,12 @@ public void should_execute_count_query_and_map_to_long() { @Test public void should_fail_to_map_to_long_if_query_returns_other_type() { - thrown.expect(MapperException.class); - thrown.expectMessage( - "Expected the query to return a column with CQL type BIGINT in first position " - + "(return type long is intended for COUNT queries)"); - dao.wrongCount(); + Throwable t = catchThrowable(() -> dao.wrongCount()); + assertThat(t) + .isInstanceOf(MapperException.class) + .hasMessage( + "Expected the query to return a column with CQL type BIGINT in first position " + + "(return type long is intended for COUNT queries)"); } @Test @@ -224,6 +228,12 @@ public void should_execute_query_and_map_to_iterable() { assertThat(iterable.all()).hasSize(10); } + @Test + public void should_execute_query_and_map_to_stream() { + Stream stream = dao.findByIdAsStream(1); + assertThat(stream).hasSize(10); + } + @Test public void should_execute_async_query_and_map_to_iterable() { MappedAsyncPagingIterable iterable = @@ -232,6 +242,13 @@ public void should_execute_async_query_and_map_to_iterable() { assertThat(iterable.hasMorePages()).isFalse(); } + @Test + public void should_execute_query_and_map_to_stream_async() + throws ExecutionException, InterruptedException { + CompletableFuture> stream = dao.findByIdAsStreamAsync(1); + assertThat(stream.get()).hasSize(10); + } + @Dao @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) public interface TestDao { @@ -288,8 +305,14 @@ public interface TestDao { @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") PagingIterable findById(int id); + @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") + Stream findByIdAsStream(int id); + @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") CompletableFuture> findByIdAsync(int id); + + @Query("SELECT * FROM ${qualifiedTableId} WHERE id = :id") + CompletableFuture> findByIdAsStreamAsync(int id); } @Entity diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SchemaValidationIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SchemaValidationIT.java new file mode 100644 index 00000000000..5bf6fc2d27a --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SchemaValidationIT.java @@ -0,0 +1,1256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static com.datastax.oss.driver.api.mapper.annotations.SchemaHint.TargetElement; +import static com.datastax.oss.driver.internal.core.util.LoggerTest.setupTestLogger; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; + +import ch.qos.logback.classic.Level; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; +import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.annotations.Update; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.internal.core.util.LoggerTest; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.4", + description = "Creates a SASI index") +public class SchemaValidationIT extends InventoryITBase { + + private static CcmRule ccm = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + + private static InventoryMapper mapper; + private static InventoryMapper mapperDisabledValidation; + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + + @BeforeClass + public static void setup() { + CqlSession session = sessionRule.session(); + List statements = + Arrays.asList( + "CREATE TABLE product_simple(id uuid PRIMARY KEY, description text, unmapped text)", + "CREATE TABLE product_simple_missing_p_k(id uuid PRIMARY KEY, description text, unmapped text)", + "CREATE TABLE product_simple_missing_clustering_column(id uuid PRIMARY KEY, description text, unmapped text)", + "CREATE TABLE product_pk_and_clustering(id uuid, c_id uuid, PRIMARY KEY (id, c_id))", + "CREATE TABLE product_wrong_type(id uuid PRIMARY KEY, wrong_type_column text)", + "CREATE TYPE dimensions_with_incorrect_name(length int, width int, height int)", + "CREATE TYPE dimensions_with_wrong_type(length int, width int, height text)", + "CREATE TYPE dimensions_with_incorrect_name_schema_hint_udt(length int, width int, height int)", + "CREATE TYPE dimensions_with_incorrect_name_schema_hint_table(length int, width int, height int)", + "CREATE TABLE product_with_incorrect_udt(id uuid PRIMARY KEY, description text, dimensions dimensions_with_incorrect_name)", + "CREATE TABLE product_with_incorrect_udt_schema_hint_udt(id uuid PRIMARY KEY, description text, dimensions dimensions_with_incorrect_name_schema_hint_udt)", + "CREATE TABLE product_with_incorrect_udt_schema_hint_table(id uuid PRIMARY KEY, description text, dimensions dimensions_with_incorrect_name_schema_hint_table)", + "CREATE TABLE product_with_udt_wrong_type(id uuid PRIMARY KEY, description text, dimensions dimensions_with_wrong_type)"); + + for (String query : statements) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + } + for (String query : createStatements(ccm)) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + } + mapper = + new SchemaValidationIT_InventoryMapperBuilder(session) + .withSchemaValidationEnabled(true) + .build(); + mapperDisabledValidation = + new SchemaValidationIT_InventoryMapperBuilder(session) + .withSchemaValidationEnabled(false) + .build(); + } + + @Before + public void clearData() { + CqlSession session = sessionRule.session(); + session.execute( + SimpleStatement.builder("TRUNCATE product_simple") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product_with_incorrect_udt") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product_with_incorrect_udt_schema_hint_udt") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product_with_incorrect_udt_schema_hint_table") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product_wrong_type") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product_pk_and_clustering") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + session.execute( + SimpleStatement.builder("TRUNCATE product_with_udt_wrong_type") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + + @Test + public void should_throw_when_use_not_properly_mapped_entity() { + assertThatThrownBy(() -> mapper.productSimpleDao(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + String.format( + "The CQL ks.table: %s.product_simple has missing columns: [description_with_incorrect_name, some_other_not_mapped_field] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimple", + sessionRule.keyspace())); + } + + @Test + public void + should_throw_when_use_not_properly_mapped_entity_when_ks_is_passed_as_null_extracting_ks_from_session() { + assertThatThrownBy(() -> mapper.productSimpleDao(null)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + String.format( + "The CQL ks.table: %s.product_simple has missing columns: [description_with_incorrect_name, some_other_not_mapped_field] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimple", + sessionRule.keyspace())); + } + + @Test + public void should_log_warn_when_entity_has_no_corresponding_cql_table() { + LoggerTest.LoggerSetup logger = + setupTestLogger( + SchemaValidationIT_ProductCqlTableMissingHelper__MapperGenerated.class, Level.WARN); + try { + assertThatThrownBy(() -> mapper.productCqlTableMissingDao(sessionRule.keyspace())) + .isInstanceOf(InvalidQueryException.class); + + verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains( + String.format( + "There is no ks.table or UDT: %s.product_cql_table_missing for the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductCqlTableMissing, or metadata is out of date.", + sessionRule.keyspace())); + + } finally { + logger.close(); + } + } + + @Test + public void should_throw_general_driver_exception_when_schema_validation_check_is_disabled() { + assertThatThrownBy( + () -> mapperDisabledValidation.productDaoValidationDisabled(sessionRule.keyspace())) + .isInstanceOf(InvalidQueryException.class) + .hasMessageContaining("Undefined column name description_with_incorrect_name"); + } + + @Test + public void should_not_throw_on_table_with_properly_mapped_udt_field() { + assertThatCode(() -> mapper.productDao(sessionRule.keyspace())).doesNotThrowAnyException(); + } + + @Test + public void should_throw_when_use_not_properly_mapped_entity_with_udt() { + assertThatThrownBy(() -> mapper.productWithIncorrectUdtDao(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasStackTraceContaining( + String.format( + "The CQL ks.udt: %s.dimensions_with_incorrect_name has missing columns: [length_not_present] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithIncorrectName", + sessionRule.keyspace())); + } + + @Test + public void should_throw_when_use_not_properly_mapped_entity_with_udt_with_udt_schema_hint() { + assertThatThrownBy(() -> mapper.productWithIncorrectUdtSchemaHintUdt(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasStackTraceContaining( + String.format( + "The CQL ks.udt: %s.dimensions_with_incorrect_name_schema_hint_udt has missing columns: [length_not_present] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithIncorrectNameSchemaHintUdt", + sessionRule.keyspace())); + } + + @Test + public void + should_warn_about_missing_table_when_use_not_properly_mapped_entity_with_udt_with_table_schema_hint() { + LoggerTest.LoggerSetup logger = + setupTestLogger( + SchemaValidationIT_DimensionsWithIncorrectNameSchemaHintTableHelper__MapperGenerated + .class, + Level.WARN); + try { + // when + mapper.productWithIncorrectUdtSchemaHintTable(sessionRule.keyspace()); + + verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains( + String.format( + "There is no ks.table or UDT: %s.dimensions_with_incorrect_name_schema_hint_table for the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithIncorrectNameSchemaHintTable, or metadata is out of date.", + sessionRule.keyspace())); + } finally { + logger.close(); + } + } + + @Test + public void should_throw_when_table_is_missing_PKs() { + assertThatThrownBy(() -> mapper.productSimpleMissingPKDao(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + String.format( + "The CQL ks.table: %s.product_simple_missing_p_k has missing Primary Key columns: [id_not_present] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimpleMissingPK", + sessionRule.keyspace())); + } + + @Test + public void should_throw_when_table_is_missing_clustering_column() { + assertThatThrownBy(() -> mapper.productSimpleMissingClusteringColumn(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + String.format( + "The CQL ks.table: %s.product_simple_missing_clustering_column has missing Clustering columns: [not_existing_clustering_column] that are defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimpleMissingClusteringColumn", + sessionRule.keyspace())); + } + + @Test + public void should_throw_when_type_defined_in_table_does_not_match_type_from_entity() { + assertThatThrownBy(() -> mapper.productDaoWrongType(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + String.format( + "The CQL ks.table: %s.product_wrong_type defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductWrongType declares type mappings that are not supported by the codec registry:\n" + + "Field: wrong_type_column, Entity Type: java.lang.Integer, CQL type: TEXT", + sessionRule.keyspace())); + } + + @Test + public void should_throw_when_type_defined_in_udt_does_not_match_type_from_entity() { + assertThatThrownBy(() -> mapper.productWithUdtWrongTypeDao(sessionRule.keyspace())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining( + String.format( + "The CQL ks.udt: %s.dimensions_with_wrong_type defined in the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.DimensionsWithWrongType declares type mappings that are not supported by the codec registry:\n" + + "Field: height, Entity Type: java.lang.Integer, CQL type: TEXT", + sessionRule.keyspace())); + } + + @Test + public void should_not_throw_when_have_correct_pk_and_clustering() { + assertThatCode(() -> mapper.productPkAndClusteringDao(sessionRule.keyspace())) + .doesNotThrowAnyException(); + } + + @Test + public void should_log_warning_when_passing_not_existing_keyspace() { + LoggerTest.LoggerSetup logger = + setupTestLogger(SchemaValidationIT_ProductSimpleHelper__MapperGenerated.class, Level.WARN); + try { + // when + assertThatThrownBy( + () -> mapper.productSimpleDao(CqlIdentifier.fromCql("not_existing_keyspace"))) + .isInstanceOf(InvalidQueryException.class) + .hasMessageContaining("not_existing_keyspace does not exist"); + + // then + verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains( + "Unable to validate table: product_simple for the entity class: com.datastax.oss.driver.mapper.SchemaValidationIT.ProductSimple because the session metadata has no information about the keyspace: not_existing_keyspace."); + } finally { + logger.close(); + } + } + + @Test + public void should_not_warn_or_throw_when_target_element_is_NONE() { + LoggerTest.LoggerSetup logger = + setupTestLogger( + SchemaValidationIT_DoesNotExistNoValidationHelper__MapperGenerated.class, Level.WARN); + + // when + mapper.noValidationDao(sessionRule.keyspace()); + + // then + // no exceptions, no logs + verify(logger.appender, never()).doAppend(any()); + } + + @Mapper + public interface InventoryMapper { + @DaoFactory + ProductSimpleDao productSimpleDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductSimpleCqlTableMissingDao productCqlTableMissingDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductSimpleDaoValidationDisabledDao productDaoValidationDisabled( + @DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductWithIncorrectUdtDao productWithIncorrectUdtDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductWithIncorrectUdtSchemaHintUdtDao productWithIncorrectUdtSchemaHintUdt( + @DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductWithIncorrectUdtSchemaHintTableDao productWithIncorrectUdtSchemaHintTable( + @DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductWithUdtWrongTypeDao productWithUdtWrongTypeDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductSimpleMissingPKDao productSimpleMissingPKDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductSimpleMissingClusteringColumnDao productSimpleMissingClusteringColumn( + @DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductDaoWrongTypeDao productDaoWrongType(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + ProductPkAndClusteringDao productPkAndClusteringDao(@DaoKeyspace CqlIdentifier keyspace); + + @DaoFactory + NoValidationDao noValidationDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + public interface ProductWithIncorrectUdtDao { + + @Update(customWhereClause = "id = :id") + void updateWhereId(ProductWithIncorrectUdt product, UUID id); + } + + @Dao + public interface ProductWithIncorrectUdtSchemaHintUdtDao { + + @Update(customWhereClause = "id = :id") + void updateWhereId(ProductWithIncorrectUdtSchemaHintUdt product, UUID id); + } + + @Dao + public interface ProductWithIncorrectUdtSchemaHintTableDao { + + @Update(customWhereClause = "id = :id") + void updateWhereId(ProductWithIncorrectUdtSchemaHintTable product, UUID id); + } + + @Dao + public interface ProductWithUdtWrongTypeDao { + + @Update(customWhereClause = "id = :id") + void updateWhereId(ProductWithUdtWrongType product, UUID id); + } + + @Dao + public interface ProductDao { + + @Update(customWhereClause = "id = :id") + void updateWhereId(Product product, UUID id); + } + + @Dao + public interface ProductSimpleDao { + + @Select + ProductSimple findById(UUID productId); + } + + @Dao + public interface ProductSimpleDaoValidationDisabledDao { + + @Select + ProductSimple findById(UUID productId); + } + + @Dao + public interface ProductSimpleCqlTableMissingDao { + + @Select + ProductCqlTableMissing findById(UUID productId); + } + + @Dao + public interface ProductSimpleMissingPKDao { + @Select + ProductSimpleMissingPK findById(UUID productId); + } + + @Dao + public interface ProductSimpleMissingClusteringColumnDao { + @Select + ProductSimpleMissingClusteringColumn findById(UUID productId); + } + + @Dao + public interface ProductDaoWrongTypeDao { + + @Select + ProductWrongType findById(UUID productId); + } + + @Dao + public interface ProductPkAndClusteringDao { + + @Select + ProductPkAndClustering findById(UUID productId); + } + + @Dao + public interface NoValidationDao { + // Not a real query, we just need to reference the entities + @QueryProvider( + providerClass = DummyProvider.class, + entityHelpers = {DoesNotExistNoValidation.class, ProductCqlTableMissingNoValidation.class}) + void doNothing(); + } + + @SuppressWarnings("unused") + static class DummyProvider { + DummyProvider( + MapperContext context, + EntityHelper helper1, + EntityHelper helper2) {} + + void doNothing() {} + } + + @Entity + public static class ProductCqlTableMissing { + @PartitionKey private UUID id; + + public ProductCqlTableMissing() {} + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + } + + @Entity + public static class ProductSimpleMissingPK { + @PartitionKey private UUID idNotPresent; + + public ProductSimpleMissingPK() {} + + public UUID getIdNotPresent() { + return idNotPresent; + } + + public void setIdNotPresent(UUID idNotPresent) { + this.idNotPresent = idNotPresent; + } + } + + @Entity + public static class ProductWrongType { + @PartitionKey private UUID id; + private Integer wrongTypeColumn; + + public ProductWrongType() {} + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public Integer getWrongTypeColumn() { + return wrongTypeColumn; + } + + public void setWrongTypeColumn(Integer wrongTypeColumn) { + this.wrongTypeColumn = wrongTypeColumn; + } + } + + @Entity + public static class ProductSimpleMissingClusteringColumn { + @PartitionKey private UUID id; + @ClusteringColumn private Integer notExistingClusteringColumn; + + public ProductSimpleMissingClusteringColumn() {} + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public Integer getNotExistingClusteringColumn() { + return notExistingClusteringColumn; + } + + public void setNotExistingClusteringColumn(Integer notExistingClusteringColumn) { + this.notExistingClusteringColumn = notExistingClusteringColumn; + } + } + + @Entity + public static class ProductPkAndClustering { + @PartitionKey private UUID id; + @ClusteringColumn private UUID cId; + + public ProductPkAndClustering() {} + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public UUID getcId() { + return cId; + } + + public void setcId(UUID cId) { + this.cId = cId; + } + } + + @Entity + public static class ProductSimple { + @PartitionKey private UUID id; + private String descriptionWithIncorrectName; + private Integer someOtherNotMappedField; + + public ProductSimple() {} + + public ProductSimple( + UUID id, String descriptionWithIncorrectName, Integer someOtherNotMappedField) { + this.id = id; + this.descriptionWithIncorrectName = descriptionWithIncorrectName; + this.someOtherNotMappedField = someOtherNotMappedField; + } + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public String getDescriptionWithIncorrectName() { + return descriptionWithIncorrectName; + } + + public void setDescriptionWithIncorrectName(String descriptionWithIncorrectName) { + this.descriptionWithIncorrectName = descriptionWithIncorrectName; + } + + public Integer getSomeOtherNotMappedField() { + return someOtherNotMappedField; + } + + public void setSomeOtherNotMappedField(Integer someOtherNotMappedField) { + this.someOtherNotMappedField = someOtherNotMappedField; + } + + @Override + public boolean equals(Object o) { + + if (this == o) { + return true; + } + if (!(o instanceof ProductSimple)) { + return false; + } + ProductSimple that = (ProductSimple) o; + return this.id.equals(that.id) + && this.someOtherNotMappedField.equals(that.someOtherNotMappedField) + && this.descriptionWithIncorrectName.equals(that.descriptionWithIncorrectName); + } + + @Override + public int hashCode() { + return Objects.hash(id, descriptionWithIncorrectName, someOtherNotMappedField); + } + + @Override + public String toString() { + return "ProductSimple{" + + "id=" + + id + + ", descriptionWithIncorrectName='" + + descriptionWithIncorrectName + + '\'' + + ", someOtherNotMappedField=" + + someOtherNotMappedField + + '}'; + } + } + + @Entity + public static class ProductWithIncorrectUdt { + + @PartitionKey private UUID id; + private String description; + private DimensionsWithIncorrectName dimensions; + + public ProductWithIncorrectUdt() {} + + public ProductWithIncorrectUdt( + UUID id, String description, DimensionsWithIncorrectName dimensions) { + this.id = id; + this.description = description; + this.dimensions = dimensions; + } + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public DimensionsWithIncorrectName getDimensions() { + return dimensions; + } + + public void setDimensions(DimensionsWithIncorrectName dimensions) { + this.dimensions = dimensions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ProductWithIncorrectUdt)) { + return false; + } + ProductWithIncorrectUdt that = (ProductWithIncorrectUdt) o; + return this.id.equals(that.id) + && this.description.equals(that.description) + && this.dimensions.equals(that.dimensions); + } + + @Override + public int hashCode() { + return Objects.hash(id, description, dimensions); + } + + @Override + public String toString() { + return "ProductWithIncorrectUdt{" + + "id=" + + id + + ", description='" + + description + + '\'' + + ", dimensions=" + + dimensions + + '}'; + } + } + + @Entity + public static class ProductWithUdtWrongType { + + @PartitionKey private UUID id; + private String description; + private DimensionsWithWrongType dimensions; + + public ProductWithUdtWrongType() {} + + public ProductWithUdtWrongType( + UUID id, String description, DimensionsWithWrongType dimensions) { + this.id = id; + this.description = description; + this.dimensions = dimensions; + } + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public DimensionsWithWrongType getDimensions() { + return dimensions; + } + + public void setDimensions(DimensionsWithWrongType dimensions) { + this.dimensions = dimensions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ProductWithUdtWrongType)) { + return false; + } + ProductWithUdtWrongType that = (ProductWithUdtWrongType) o; + return this.id.equals(that.id) + && this.description.equals(that.description) + && this.dimensions.equals(that.dimensions); + } + + @Override + public int hashCode() { + return Objects.hash(id, description, dimensions); + } + + @Override + public String toString() { + return "ProductWithUdtWrongType{" + + "id=" + + id + + ", description='" + + description + + '\'' + + ", dimensions=" + + dimensions + + '}'; + } + } + + @Entity + public static class ProductWithIncorrectUdtSchemaHintUdt { + + @PartitionKey private UUID id; + private String description; + private DimensionsWithIncorrectNameSchemaHintUdt dimensions; + + public ProductWithIncorrectUdtSchemaHintUdt() {} + + public ProductWithIncorrectUdtSchemaHintUdt( + UUID id, String description, DimensionsWithIncorrectNameSchemaHintUdt dimensions) { + this.id = id; + this.description = description; + this.dimensions = dimensions; + } + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public DimensionsWithIncorrectNameSchemaHintUdt getDimensions() { + return dimensions; + } + + public void setDimensions(DimensionsWithIncorrectNameSchemaHintUdt dimensions) { + this.dimensions = dimensions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ProductWithIncorrectUdtSchemaHintUdt)) { + return false; + } + ProductWithIncorrectUdtSchemaHintUdt that = (ProductWithIncorrectUdtSchemaHintUdt) o; + return this.id.equals(that.id) + && this.description.equals(that.description) + && this.dimensions.equals(that.dimensions); + } + + @Override + public int hashCode() { + return Objects.hash(id, description, dimensions); + } + + @Override + public String toString() { + return "ProductWithIncorrectUdtSchemaHint{" + + "id=" + + id + + ", description='" + + description + + '\'' + + ", dimensions=" + + dimensions + + '}'; + } + } + + @Entity + public static class ProductWithIncorrectUdtSchemaHintTable { + + @PartitionKey private UUID id; + private String description; + private DimensionsWithIncorrectNameSchemaHintTable dimensions; + + public ProductWithIncorrectUdtSchemaHintTable() {} + + public ProductWithIncorrectUdtSchemaHintTable( + UUID id, String description, DimensionsWithIncorrectNameSchemaHintTable dimensions) { + this.id = id; + this.description = description; + this.dimensions = dimensions; + } + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public DimensionsWithIncorrectNameSchemaHintTable getDimensions() { + return dimensions; + } + + public void setDimensions(DimensionsWithIncorrectNameSchemaHintTable dimensions) { + this.dimensions = dimensions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ProductWithIncorrectUdtSchemaHintTable)) { + return false; + } + ProductWithIncorrectUdtSchemaHintTable that = (ProductWithIncorrectUdtSchemaHintTable) o; + return this.id.equals(that.id) + && this.description.equals(that.description) + && this.dimensions.equals(that.dimensions); + } + + @Override + public int hashCode() { + return Objects.hash(id, description, dimensions); + } + + @Override + public String toString() { + return "ProductWithIncorrectUdtSchemaHintTable{" + + "id=" + + id + + ", description='" + + description + + '\'' + + ", dimensions=" + + dimensions + + '}'; + } + } + + @Entity + public static class DimensionsWithIncorrectName { + + private int lengthNotPresent; + private int width; + private int height; + + public DimensionsWithIncorrectName() {} + + public DimensionsWithIncorrectName(int lengthNotPresent, int width, int height) { + this.lengthNotPresent = lengthNotPresent; + this.width = width; + this.height = height; + } + + public int getLengthNotPresent() { + return lengthNotPresent; + } + + public void setLengthNotPresent(int lengthNotPresent) { + this.lengthNotPresent = lengthNotPresent; + } + + public int getWidth() { + return width; + } + + public void setWidth(int width) { + this.width = width; + } + + public int getHeight() { + return height; + } + + public void setHeight(int height) { + this.height = height; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DimensionsWithIncorrectName)) { + return false; + } + DimensionsWithIncorrectName that = (DimensionsWithIncorrectName) o; + return this.lengthNotPresent == that.lengthNotPresent + && this.height == that.height + && this.width == that.width; + } + + @Override + public int hashCode() { + return Objects.hash(lengthNotPresent, width, height); + } + + @Override + public String toString() { + return "DimensionsWithIncorrectName{" + + "lengthNotPresent=" + + lengthNotPresent + + ", width=" + + width + + ", height=" + + height + + '}'; + } + } + + @Entity + public static class DimensionsWithWrongType { + + private int length; + private int width; + private int height; + + public DimensionsWithWrongType() {} + + public DimensionsWithWrongType(int length, int width, int height) { + this.length = length; + this.width = width; + this.height = height; + } + + public int getLength() { + return length; + } + + public void setLength(int length) { + this.length = length; + } + + public int getWidth() { + return width; + } + + public void setWidth(int width) { + this.width = width; + } + + public int getHeight() { + return height; + } + + public void setHeight(int height) { + this.height = height; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DimensionsWithWrongType)) { + return false; + } + DimensionsWithWrongType that = (DimensionsWithWrongType) o; + return this.length == that.length && this.height == that.height && this.width == that.width; + } + + @Override + public int hashCode() { + return Objects.hash(length, width, height); + } + + @Override + public String toString() { + return "DimensionsWithWrongType{" + + "length=" + + length + + ", width=" + + width + + ", height=" + + height + + '}'; + } + } + + @Entity + @SchemaHint(targetElement = TargetElement.UDT) + public static class DimensionsWithIncorrectNameSchemaHintUdt { + + private int lengthNotPresent; + private int width; + private int height; + + public DimensionsWithIncorrectNameSchemaHintUdt() {} + + public DimensionsWithIncorrectNameSchemaHintUdt(int lengthNotPresent, int width, int height) { + this.lengthNotPresent = lengthNotPresent; + this.width = width; + this.height = height; + } + + public int getLengthNotPresent() { + return lengthNotPresent; + } + + public void setLengthNotPresent(int lengthNotPresent) { + this.lengthNotPresent = lengthNotPresent; + } + + public int getWidth() { + return width; + } + + public void setWidth(int width) { + this.width = width; + } + + public int getHeight() { + return height; + } + + public void setHeight(int height) { + this.height = height; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DimensionsWithIncorrectNameSchemaHintUdt)) { + return false; + } + DimensionsWithIncorrectNameSchemaHintUdt that = (DimensionsWithIncorrectNameSchemaHintUdt) o; + return this.lengthNotPresent == that.lengthNotPresent + && this.height == that.height + && this.width == that.width; + } + + @Override + public int hashCode() { + return Objects.hash(lengthNotPresent, width, height); + } + + @Override + public String toString() { + return "DimensionsWithIncorrectNameSchemaHintUdt{" + + "lengthNotPresent=" + + lengthNotPresent + + ", width=" + + width + + ", height=" + + height + + '}'; + } + } + + @Entity + @SchemaHint(targetElement = TargetElement.TABLE) + public static class DimensionsWithIncorrectNameSchemaHintTable { + + private int lengthNotPresent; + private int width; + private int height; + + public DimensionsWithIncorrectNameSchemaHintTable() {} + + public DimensionsWithIncorrectNameSchemaHintTable(int lengthNotPresent, int width, int height) { + this.lengthNotPresent = lengthNotPresent; + this.width = width; + this.height = height; + } + + public int getLengthNotPresent() { + return lengthNotPresent; + } + + public void setLengthNotPresent(int lengthNotPresent) { + this.lengthNotPresent = lengthNotPresent; + } + + public int getWidth() { + return width; + } + + public void setWidth(int width) { + this.width = width; + } + + public int getHeight() { + return height; + } + + public void setHeight(int height) { + this.height = height; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof DimensionsWithIncorrectNameSchemaHintTable)) { + return false; + } + DimensionsWithIncorrectNameSchemaHintTable that = + (DimensionsWithIncorrectNameSchemaHintTable) o; + return this.lengthNotPresent == that.lengthNotPresent + && this.height == that.height + && this.width == that.width; + } + + @Override + public int hashCode() { + return Objects.hash(lengthNotPresent, width, height); + } + + @Override + public String toString() { + return "DimensionsWithIncorrectNameSchemaHintTable{" + + "lengthNotPresent=" + + lengthNotPresent + + ", width=" + + width + + ", height=" + + height + + '}'; + } + } + + @Entity + @SchemaHint(targetElement = TargetElement.NONE) + public static class DoesNotExistNoValidation { + private int k; + + public int getK() { + return k; + } + + public void setK(int k) { + this.k = k; + } + } + + @Entity + @SchemaHint(targetElement = TargetElement.NONE) + public static class ProductCqlTableMissingNoValidation extends ProductCqlTableMissing {} +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java index 1d8336053cb..1f1b92b8623 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectCustomWhereClauseIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,8 @@ package com.datastax.oss.driver.mapper; import static com.datastax.oss.driver.assertions.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.junit.Assume.assumeFalse; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -30,13 +34,15 @@ import com.datastax.oss.driver.api.mapper.annotations.Insert; import com.datastax.oss.driver.api.mapper.annotations.Mapper; import com.datastax.oss.driver.api.mapper.annotations.Select; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; +import java.time.Duration; import java.util.concurrent.CompletionStage; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -45,52 +51,70 @@ import org.junit.rules.TestRule; @Category(ParallelizableTests.class) -@CassandraRequirement(min = "3.4", description = "Creates a SASI index") +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.4", + description = "Creates a SASI index") public class SelectCustomWhereClauseIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); - - for (String query : createStatements(ccm)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } + // SASI index creation is broken in DSE 6.8.0 + // All tests in this class require SASI, so ensure it's working + assumeFalse(InventoryITBase.isSasiBroken(CCM_RULE)); + + CqlSession session = SESSION_RULE.session(); + + SchemaChangeSynchronizer.withLock( + () -> { + for (String query : createStatements(CCM_RULE, true)) { + session.execute( + SimpleStatement.builder(query) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + }); InventoryMapper inventoryMapper = new SelectCustomWhereClauseIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); - } - - @Before - public void insertData() { + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); dao.save(FLAMETHROWER); dao.save(MP3_DOWNLOAD); } @Test public void should_select_with_custom_clause() { - PagingIterable products = dao.findByDescription("%mp3%"); - assertThat(products.one()).isEqualTo(MP3_DOWNLOAD); - assertThat(products.iterator()).isExhausted(); + await() + .atMost(Duration.ofMinutes(1)) + .untilAsserted( + () -> { + PagingIterable products = dao.findByDescription("%mp3%"); + assertThat(products.one()).isEqualTo(MP3_DOWNLOAD); + assertThat(products.iterator()).isExhausted(); + }); } @Test public void should_select_with_custom_clause_asynchronously() { - MappedAsyncPagingIterable iterable = - CompletableFutures.getUninterruptibly( - dao.findByDescriptionAsync("%mp3%").toCompletableFuture()); - assertThat(iterable.one()).isEqualTo(MP3_DOWNLOAD); - assertThat(iterable.currentPage().iterator()).isExhausted(); - assertThat(iterable.hasMorePages()).isFalse(); + await() + .atMost(Duration.ofMinutes(1)) + .untilAsserted( + () -> { + MappedAsyncPagingIterable iterable = + CompletableFutures.getUninterruptibly( + dao.findByDescriptionAsync("%mp3%").toCompletableFuture()); + assertThat(iterable.one()).isEqualTo(MP3_DOWNLOAD); + assertThat(iterable.currentPage().iterator()).isExhausted(); + assertThat(iterable.hasMorePages()).isFalse(); + }); } @Mapper diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java index 3fd946d0a36..fcb78c3075d 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +19,12 @@ import static org.assertj.core.api.Assertions.assertThat; -import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; import com.datastax.oss.driver.api.core.PagingIterable; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.mapper.annotations.Dao; import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; -import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; import com.datastax.oss.driver.api.mapper.annotations.Delete; import com.datastax.oss.driver.api.mapper.annotations.Insert; @@ -37,6 +38,7 @@ import java.util.Optional; import java.util.UUID; import java.util.concurrent.CompletionStage; +import java.util.stream.Stream; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -48,11 +50,11 @@ @Category(ParallelizableTests.class) public class SelectIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; @@ -60,16 +62,19 @@ public class SelectIT extends InventoryITBase { @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); - for (String query : createStatements(ccm)) { + for (String query : createStatements(CCM_RULE)) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } - InventoryMapper inventoryMapper = new SelectIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); - saleDao = inventoryMapper.productSaleDao(sessionRule.keyspace()); + InventoryMapper inventoryMapper = + new SelectIT_InventoryMapperBuilder(session) + .withDefaultKeyspace(SESSION_RULE.keyspace()) + .build(); + dao = inventoryMapper.productDao(); + saleDao = inventoryMapper.productSaleDao(); } @Before @@ -98,6 +103,21 @@ public void should_select_all() { assertThat(dao.all().all()).hasSize(2); } + @Test + public void should_select_all_async() { + assertThat(CompletableFutures.getUninterruptibly(dao.allAsync()).currentPage()).hasSize(2); + } + + @Test + public void should_select_all_stream() { + assertThat(dao.stream()).hasSize(2); + } + + @Test + public void should_select_all_stream_async() { + assertThat(CompletableFutures.getUninterruptibly(dao.streamAsync())).hasSize(2); + } + @Test public void should_select_by_primary_key_asynchronously() { assertThat(CompletableFutures.getUninterruptibly(dao.findByIdAsync(FLAMETHROWER.getId()))) @@ -140,6 +160,18 @@ public void should_select_all_sales() { MP3_DOWNLOAD_SALE_1); } + @Test + public void should_select_all_sales_stream() { + assertThat(saleDao.stream()) + .containsOnly( + FLAMETHROWER_SALE_1, + FLAMETHROWER_SALE_3, + FLAMETHROWER_SALE_4, + FLAMETHROWER_SALE_2, + FLAMETHROWER_SALE_5, + MP3_DOWNLOAD_SALE_1); + } + @Test public void should_select_by_partition_key() { assertThat(saleDao.salesByIdForDay(FLAMETHROWER.getId(), DATE_1).all()) @@ -147,12 +179,25 @@ public void should_select_by_partition_key() { FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4); } + @Test + public void should_select_by_partition_key_stream() { + assertThat(saleDao.salesByIdForDayStream(FLAMETHROWER.getId(), DATE_1)) + .containsOnly( + FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_2, FLAMETHROWER_SALE_4); + } + @Test public void should_select_by_partition_key_and_partial_clustering() { assertThat(saleDao.salesByIdForCustomer(FLAMETHROWER.getId(), DATE_1, 1).all()) .containsOnly(FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_4); } + @Test + public void should_select_by_partition_key_and_partial_clustering_stream() { + assertThat(saleDao.salesByIdForCustomerStream(FLAMETHROWER.getId(), DATE_1, 1)) + .containsOnly(FLAMETHROWER_SALE_1, FLAMETHROWER_SALE_3, FLAMETHROWER_SALE_4); + } + @Test public void should_select_by_primary_key_sales() { assertThat( @@ -164,10 +209,10 @@ public void should_select_by_primary_key_sales() { @Mapper public interface InventoryMapper { @DaoFactory - ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + ProductDao productDao(); @DaoFactory - ProductSaleDao productSaleDao(@DaoKeyspace CqlIdentifier keyspace); + ProductSaleDao productSaleDao(); } @Dao @@ -179,6 +224,15 @@ public interface ProductDao { @Select PagingIterable all(); + @Select + CompletionStage> allAsync(); + + @Select + Stream stream(); + + @Select + CompletionStage> streamAsync(); + @Select Optional findOptionalById(UUID productId); @@ -202,14 +256,23 @@ public interface ProductSaleDao { @Select PagingIterable all(); + @Select + Stream stream(); + // partition key provided @Select PagingIterable salesByIdForDay(UUID id, String day); + @Select + Stream salesByIdForDayStream(UUID id, String day); + // partition key and partial clustering key @Select PagingIterable salesByIdForCustomer(UUID id, String day, int customerId); + @Select + Stream salesByIdForCustomerStream(UUID id, String day, int customerId); + // full primary key @Select ProductSale salesByIdForCustomerAtTime(UUID id, String day, int customerId, UUID ts); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectOtherClausesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectOtherClausesIT.java new file mode 100644 index 00000000000..3eb40fd8520 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectOtherClausesIT.java @@ -0,0 +1,295 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.PagingIterable; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; +import com.datastax.oss.driver.api.mapper.annotations.Computed; +import com.datastax.oss.driver.api.mapper.annotations.CqlName; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.6", + description = "Uses PER PARTITION LIMIT") +public class SelectOtherClausesIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static SimpleDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + + for (String query : + ImmutableList.of("CREATE TABLE simple (k int, cc int, v int, PRIMARY KEY (k, cc))")) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); + } + + TestMapper mapper = TestMapper.builder(session).build(); + dao = mapper.simpleDao(SESSION_RULE.keyspace()); + + for (int k = 0; k < 2; k++) { + for (int cc = 0; cc < 10; cc++) { + dao.insert(new Simple(k, cc, 1)); + } + } + } + + @Test + public void should_select_with_limit() { + PagingIterable elements = dao.selectWithLimit(10); + assertThat(elements.isFullyFetched()).isTrue(); + assertThat(elements.getAvailableWithoutFetching()).isEqualTo(10); + + elements = dao.selectWithLimit(0, 5); + assertThat(elements.isFullyFetched()).isTrue(); + assertThat(elements.getAvailableWithoutFetching()).isEqualTo(5); + + elements = dao.selectWithLimit(0, 0, 1); + assertThat(elements.isFullyFetched()).isTrue(); + assertThat(elements.getAvailableWithoutFetching()).isEqualTo(1); + } + + @Test + public void should_select_with_per_partition_limit() { + PagingIterable elements = dao.selectWithPerPartitionLimit(5); + assertThat(elements.isFullyFetched()).isTrue(); + assertThat(elements.getAvailableWithoutFetching()).isEqualTo(10); + + Map elementCountPerPartition = new HashMap<>(); + for (Simple element : elements) { + elementCountPerPartition.compute(element.getK(), (k, v) -> (v == null) ? 1 : v + 1); + } + assertThat(elementCountPerPartition).hasSize(2).containsEntry(0, 5).containsEntry(1, 5); + } + + @Test + public void should_select_with_order_by() { + PagingIterable elements = dao.selectByCcDesc(0); + int previousCc = Integer.MAX_VALUE; + for (Simple element : elements) { + assertThat(element.getCc()).isLessThan(previousCc); + previousCc = element.getCc(); + } + } + + @Test + public void should_select_with_group_by() { + PagingIterable sums = dao.selectSumByK(); + assertThat(sums.all()).hasSize(2).containsOnly(new Sum(0, 10), new Sum(1, 10)); + } + + @Test + public void should_select_with_allow_filtering() { + PagingIterable elements = dao.selectByCc(1); + assertThat(elements.all()).hasSize(2).containsOnly(new Simple(0, 1, 1), new Simple(1, 1, 1)); + } + + @Mapper + public interface TestMapper { + @DaoFactory + SimpleDao simpleDao(@DaoKeyspace CqlIdentifier keyspace); + + static MapperBuilder builder(CqlSession session) { + return new SelectOtherClausesIT_TestMapperBuilder(session); + } + } + + @Dao + public interface SimpleDao { + @Insert + void insert(Simple simple); + + @Select(limit = ":l") + PagingIterable selectWithLimit(@CqlName("l") int l); + + @Select(limit = ":l") + PagingIterable selectWithLimit(int k, @CqlName("l") int l); + + /** + * Contrived since the query will return at most a single row, but this is just to check that + * {@code l} doesn't need an explicit name when the full primary key is provided. + */ + @Select(limit = ":l") + PagingIterable selectWithLimit(int k, int cc, int l); + + @Select(perPartitionLimit = ":perPartitionLimit") + PagingIterable selectWithPerPartitionLimit( + @CqlName("perPartitionLimit") int perPartitionLimit); + + @Select(orderBy = "cc DESC") + PagingIterable selectByCcDesc(int k); + + @Select(groupBy = "k") + PagingIterable selectSumByK(); + + @Select(customWhereClause = "cc = :cc", allowFiltering = true) + PagingIterable selectByCc(int cc); + } + + @Entity + public static class Simple { + @PartitionKey private int k; + @ClusteringColumn private int cc; + private int v; + + public Simple() {} + + public Simple(int k, int cc, int v) { + this.k = k; + this.cc = cc; + this.v = v; + } + + public int getK() { + return k; + } + + public void setK(int k) { + this.k = k; + } + + public int getCc() { + return cc; + } + + public void setCc(int cc) { + this.cc = cc; + } + + public int getV() { + return v; + } + + public void setV(int v) { + this.v = v; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof Simple) { + Simple that = (Simple) other; + return this.k == that.k && this.cc == that.cc && this.v == that.v; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(k, cc, v); + } + + @Override + public String toString() { + return String.format("Simple(%d, %d, %d)", k, cc, v); + } + } + + @Entity + @CqlName("simple") + public static class Sum { + private int k; + + @Computed("sum(v)") + private int value; + + public Sum() {} + + public Sum(int k, int value) { + this.k = k; + this.value = value; + } + + public int getK() { + return k; + } + + public void setK(int k) { + this.k = k; + } + + public int getValue() { + return value; + } + + public void setValue(int value) { + this.value = value; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other instanceof Sum) { + Sum that = (Sum) other; + return this.k == that.k && this.value == that.value; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(k, value); + } + + @Override + public String toString() { + return String.format("Sum(%d, %d)", k, value); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectReactiveIT.java new file mode 100644 index 00000000000..79e4d2b33ea --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SelectReactiveIT.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Delete; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import io.reactivex.Flowable; +import java.util.UUID; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class SelectReactiveIT extends InventoryITBase { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + private static DseProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = sessionRule.session(); + + SchemaChangeSynchronizer.withLock( + () -> { + for (String query : createStatements(ccmRule)) { + session.execute( + SimpleStatement.builder(query) + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + }); + + DseInventoryMapper inventoryMapper = + new SelectReactiveIT_DseInventoryMapperBuilder(session).build(); + dao = inventoryMapper.productDao(sessionRule.keyspace()); + } + + @Before + public void insertData() { + Flowable.fromPublisher(dao.saveReactive(FLAMETHROWER)).blockingSubscribe(); + Flowable.fromPublisher(dao.saveReactive(MP3_DOWNLOAD)).blockingSubscribe(); + } + + @Test + public void should_select_by_primary_key_reactive() { + assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) + .isEqualTo(FLAMETHROWER); + Flowable.fromPublisher(dao.deleteReactive(FLAMETHROWER)).blockingSubscribe(); + assertThat( + Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())) + .singleElement() + .blockingGet()) + .isNull(); + } + + @Mapper + public interface DseInventoryMapper { + + @DaoFactory + DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface DseProductDao { + + @Select + MappedReactiveResultSet findByIdReactive(UUID productId); + + @Delete + ReactiveResultSet deleteReactive(Product product); + + @Insert + ReactiveResultSet saveReactive(Product product); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java index b382dc56349..3bf6557347a 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/SetEntityIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.mapper; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; @@ -45,32 +48,37 @@ @Category(ParallelizableTests.class) public class SetEntityIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; - - private static InventoryMapper inventoryMapper; + private static UserDefinedType dimensions2d; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); - for (String query : createStatements(ccm)) { + for (String query : createStatements(CCM_RULE)) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } - inventoryMapper = new SetEntityIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); + InventoryMapper inventoryMapper = new SetEntityIT_InventoryMapperBuilder(session).build(); + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); + dimensions2d = + session + .getKeyspace() + .flatMap(ks -> session.getMetadata().getKeyspace(ks)) + .flatMap(ks -> ks.getUserDefinedType("dimensions2d")) + .orElseThrow(AssertionError::new); } @Test public void should_set_entity_on_bound_statement() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); PreparedStatement preparedStatement = session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); BoundStatement boundStatement = preparedStatement.bind(); @@ -82,7 +90,7 @@ public void should_set_entity_on_bound_statement() { @Test public void should_set_entity_on_bound_statement_builder() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); PreparedStatement preparedStatement = session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); @@ -95,7 +103,7 @@ public void should_set_entity_on_bound_statement_builder() { @Test public void should_set_entity_on_bound_statement_setting_null() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); PreparedStatement preparedStatement = session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); @@ -110,7 +118,7 @@ public void should_set_entity_on_bound_statement_setting_null() { @Test public void should_set_entity_on_bound_statement_without_setting_null() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); PreparedStatement preparedStatement = session.prepare("INSERT INTO product (id, description, dimensions) VALUES (?, ?, ?)"); BoundStatementBuilder builder = preparedStatement.boundStatementBuilder(); @@ -126,11 +134,11 @@ public void should_set_entity_on_bound_statement_without_setting_null() { @Test public void should_set_entity_on_udt_value() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); UserDefinedType udtType = session .getMetadata() - .getKeyspace(sessionRule.keyspace()) + .getKeyspace(SESSION_RULE.keyspace()) .orElseThrow(AssertionError::new) .getUserDefinedType("dimensions") .orElseThrow(AssertionError::new); @@ -144,10 +152,74 @@ public void should_set_entity_on_udt_value() { assertThat(udtValue.getInt("height")).isEqualTo(dimensions.getHeight()); } + @Test + public void should_set_entity_on_partial_statement_when_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); + BoundStatement bound = dao.setLenient(FLAMETHROWER, ps.bind()); + assertThat(bound.getUuid(0)).isEqualTo(FLAMETHROWER.getId()); + assertThat(bound.getString(1)).isEqualTo(FLAMETHROWER.getDescription()); + } + + @Test + public void should_set_entity_on_partial_statement_builder_when_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); + BoundStatementBuilder builder = ps.boundStatementBuilder(); + dao.setLenient(FLAMETHROWER, builder); + assertThat(builder.getUuid(0)).isEqualTo(FLAMETHROWER.getId()); + assertThat(builder.getString(1)).isEqualTo(FLAMETHROWER.getDescription()); + } + + @Test + @SuppressWarnings("ResultOfMethodCallIgnored") + public void should_set_entity_on_partial_udt_when_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO product2d (id, dimensions) VALUES (?, ?)"); + BoundStatementBuilder builder = ps.boundStatementBuilder(); + builder.setUuid(0, FLAMETHROWER.getId()); + UdtValue dimensionsUdt = dimensions2d.newValue(); + Dimensions dimensions = new Dimensions(12, 34, 56); + dao.setLenient(dimensions, dimensionsUdt); + builder.setUdtValue(1, dimensionsUdt); + assertThat(dimensionsUdt.getInt("width")).isEqualTo(34); + assertThat(dimensionsUdt.getInt("height")).isEqualTo(56); + } + + @Test + public void should_not_set_entity_on_partial_statement_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); + Throwable error = catchThrowable(() -> dao.set(FLAMETHROWER, ps.bind())); + assertThat(error).hasMessage("dimensions is not a variable in this bound statement"); + } + + @Test + public void should_not_set_entity_on_partial_statement_builder_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)"); + Throwable error = catchThrowable(() -> dao.set(ps.boundStatementBuilder(), FLAMETHROWER)); + assertThat(error).hasMessage("dimensions is not a variable in this bound statement"); + } + + @Test + @SuppressWarnings("ResultOfMethodCallIgnored") + public void should_not_set_entity_on_partial_udt_when_not_lenient() { + CqlSession session = SESSION_RULE.session(); + PreparedStatement ps = session.prepare("INSERT INTO product2d (id, dimensions) VALUES (?, ?)"); + BoundStatementBuilder builder = ps.boundStatementBuilder(); + builder.setUuid(0, FLAMETHROWER.getId()); + UdtValue dimensionsUdt = dimensions2d.newValue(); + Dimensions dimensions = new Dimensions(12, 34, 56); + Throwable error = catchThrowable(() -> dao.set(dimensions, dimensionsUdt)); + assertThat(error).hasMessage("length is not a field in this UDT"); + } + private static void assertMatches(GettableByName data, Product entity) { assertThat(data.getUuid("id")).isEqualTo(entity.getId()); assertThat(data.getString("description")).isEqualTo(entity.getDescription()); UdtValue udtValue = data.getUdtValue("dimensions"); + assertThat(udtValue).isNotNull(); assertThat(udtValue.getType().getName().asInternal()).isEqualTo("dimensions"); assertThat(udtValue.getInt("length")).isEqualTo(entity.getDimensions().getLength()); assertThat(udtValue.getInt("width")).isEqualTo(entity.getDimensions().getWidth()); @@ -177,5 +249,14 @@ public interface ProductDao { @SetEntity void set(Dimensions dimensions, UdtValue udtValue); + + @SetEntity(lenient = true) + BoundStatement setLenient(Product product, BoundStatement boundStatement); + + @SetEntity(lenient = true) + void setLenient(Product product, BoundStatementBuilder builder); + + @SetEntity(lenient = true) + void setLenient(Dimensions dimensions, UdtValue udtValue); } } diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java index ed5990bf542..c5099baaf35 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/StatementAttributesIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import static com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when; import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; @@ -37,6 +40,7 @@ import com.datastax.oss.driver.api.mapper.annotations.Update; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.protocol.internal.Message; import com.datastax.oss.protocol.internal.request.Execute; import com.datastax.oss.simulacron.common.cluster.ClusterQueryLogReport; @@ -46,7 +50,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import java.nio.ByteBuffer; -import java.util.Map; +import java.util.LinkedHashMap; import java.util.Objects; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -54,26 +58,28 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.experimental.categories.Category; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +@Category(ParallelizableTests.class) public class StatementAttributesIT { - private static SimulacronRule simulacronRule = + private static final SimulacronRule SIMULACRON_RULE = new SimulacronRule(ClusterSpec.builder().withNodes(1)); - private static SessionRule sessionRule = SessionRule.builder(simulacronRule).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(simulacronRule).around(sessionRule); + private static final SessionRule SESSION_RULE = + SessionRule.builder(SIMULACRON_RULE).build(); - @Rule public ExpectedException thrown = ExpectedException.none(); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(SIMULACRON_RULE).around(SESSION_RULE); private static String PAGING_STATE = "paging_state"; private static int PAGE_SIZE = 13; private static final Simple simple = new Simple(UUID.randomUUID(), "DATA"); + @SuppressWarnings("UnnecessaryLambda") private static final Function statementFunction = builder -> builder @@ -82,6 +88,7 @@ public class StatementAttributesIT { .setSerialConsistencyLevel(DefaultConsistencyLevel.QUORUM) .setPagingState(ByteBuffer.wrap(PAGING_STATE.getBytes(UTF_8))); + @SuppressWarnings("UnnecessaryLambda") private static final Function badStatementFunction = builder -> { throw new IllegalStateException("mock error"); @@ -98,104 +105,105 @@ public static void setupClass() { primeUpdateQuery(); InventoryMapper inventoryMapper = - new StatementAttributesIT_InventoryMapperBuilder(sessionRule.session()).build(); + new StatementAttributesIT_InventoryMapperBuilder(SESSION_RULE.session()).build(); dao = inventoryMapper.simpleDao(); } @Before public void setup() { - simulacronRule.cluster().clearLogs(); + SIMULACRON_RULE.cluster().clearLogs(); } @Test public void should_honor_runtime_attributes_on_insert() { dao.save(simple, statementFunction); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), true); } @Test public void should_honor_annotation_attributes_on_insert() { dao.save2(simple); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), false); } @Test public void should_use_runtime_attributes_over_annotation_attributes() { dao.save3(simple, statementFunction); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), false); } @Test public void should_honor_runtime_attributes_on_delete() { dao.delete(simple, statementFunction); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), true); } @Test public void should_honor_annotation_attributes_on_delete() { dao.delete2(simple); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), false); } @Test public void should_honor_runtime_attributes_on_select() { dao.findByPk(simple.getPk(), statementFunction); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), true); } @Test public void should_honor_annotation_attributes_on_select() { dao.findByPk2(simple.getPk()); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), false); } @Test public void should_honor_runtime_attributes_on_query() { dao.count(simple.getPk(), statementFunction); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), true); } @Test public void should_honor_annotation_attributes_on_query() { dao.count2(simple.getPk()); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), false); } @Test public void should_honor_runtime_attributes_on_update() { dao.update(simple, statementFunction); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), true); } @Test public void should_honor_annotation_attributes_on_update() { dao.update2(simple); - ClusterQueryLogReport report = simulacronRule.cluster().getLogs(); + ClusterQueryLogReport report = SIMULACRON_RULE.cluster().getLogs(); validateQueryOptions(report.getQueryLogs().get(0), false); } @Test public void should_fail_runtime_attributes_bad() { - thrown.expect(IllegalStateException.class); - thrown.expectMessage("mock error"); - dao.save(simple, badStatementFunction); + Throwable t = catchThrowable(() -> dao.save(simple, badStatementFunction)); + assertThat(t).isInstanceOf(IllegalStateException.class).hasMessage("mock error"); } private static void primeInsertQuery() { - Map params = ImmutableMap.of("pk", simple.getPk(), "data", simple.getData()); - Map paramTypes = ImmutableMap.of("pk", "uuid", "data", "ascii"); - simulacronRule + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk(), "data", simple.getData())); + LinkedHashMap paramTypes = + new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); + SIMULACRON_RULE .cluster() .prime( when(query( @@ -209,9 +217,10 @@ private static void primeInsertQuery() { } private static void primeDeleteQuery() { - Map params = ImmutableMap.of("pk", simple.getPk()); - Map paramTypes = ImmutableMap.of("pk", "uuid"); - simulacronRule + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk())); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); + SIMULACRON_RULE .cluster() .prime( when(query( @@ -226,9 +235,10 @@ private static void primeDeleteQuery() { } private static void primeSelectQuery() { - Map params = ImmutableMap.of("pk", simple.getPk()); - Map paramTypes = ImmutableMap.of("pk", "uuid"); - simulacronRule + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk())); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); + SIMULACRON_RULE .cluster() .prime( when(query( @@ -243,9 +253,10 @@ private static void primeSelectQuery() { } private static void primeCountQuery() { - Map params = ImmutableMap.of("pk", simple.getPk()); - Map paramTypes = ImmutableMap.of("pk", "uuid"); - simulacronRule + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk())); + LinkedHashMap paramTypes = new LinkedHashMap<>(ImmutableMap.of("pk", "uuid")); + SIMULACRON_RULE .cluster() .prime( when(query( @@ -260,9 +271,11 @@ private static void primeCountQuery() { } private static void primeUpdateQuery() { - Map params = ImmutableMap.of("pk", simple.getPk(), "data", simple.getData()); - Map paramTypes = ImmutableMap.of("pk", "uuid", "data", "ascii"); - simulacronRule + LinkedHashMap params = + new LinkedHashMap<>(ImmutableMap.of("pk", simple.getPk(), "data", simple.getData())); + LinkedHashMap paramTypes = + new LinkedHashMap<>(ImmutableMap.of("pk", "uuid", "data", "ascii")); + SIMULACRON_RULE .cluster() .prime( when(query( @@ -318,10 +331,12 @@ public interface SimpleDao { void delete2(Simple simple); @Select + @SuppressWarnings("UnusedReturnValue") Simple findByPk(UUID pk, Function function); @Select @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) + @SuppressWarnings("UnusedReturnValue") Simple findByPk2(UUID pk); @Query("SELECT count(*) FROM ks.simple WHERE pk=:pk") @@ -329,6 +344,7 @@ public interface SimpleDao { @Query("SELECT count(*) FROM ks.simple WHERE pk=:pk") @StatementAttributes(consistencyLevel = "ANY", serialConsistencyLevel = "QUORUM", pageSize = 13) + @SuppressWarnings("UnusedReturnValue") long count2(UUID pk); @Update diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java index 9dd2ff85d7b..0fab03569d1 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/TransientIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,11 +49,11 @@ @Category(ParallelizableTests.class) public class TransientIT { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static TestMapper mapper; @@ -59,11 +61,11 @@ public class TransientIT { @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder("CREATE TABLE entity(id int primary key, v int)") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); mapper = new TransientIT_TestMapperBuilder(session).build(); @@ -73,7 +75,7 @@ public static void setup() { public void should_ignore_field_with_transient_annotated_field() { EntityWithTransientAnnotatedFieldDao dao = mapper.entityWithTransientAnnotatedFieldDao( - sessionRule.keyspace(), CqlIdentifier.fromCql("entity")); + SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); int key = keyProvider.incrementAndGet(); EntityWithTransientAnnotatedField entity = new EntityWithTransientAnnotatedField(key, 1, 7); @@ -90,7 +92,7 @@ public void should_ignore_field_with_transient_annotated_field() { public void should_ignore_field_with_transient_annotated_getter() { EntityWithTransientAnnotatedGetterDao dao = mapper.entityWithTransientAnnotatedGetterDao( - sessionRule.keyspace(), CqlIdentifier.fromCql("entity")); + SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); int key = keyProvider.incrementAndGet(); EntityWithTransientAnnotatedGetter entity = new EntityWithTransientAnnotatedGetter(key, 1, 7); @@ -107,7 +109,7 @@ public void should_ignore_field_with_transient_annotated_getter() { public void should_ignore_field_with_transient_keyword() { EntityWithTransientKeywordDao dao = mapper.entityWithTransientKeywordDao( - sessionRule.keyspace(), CqlIdentifier.fromCql("entity")); + SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); int key = keyProvider.incrementAndGet(); EntityWithTransientKeyword entity = new EntityWithTransientKeyword(key, 1, 7); @@ -124,7 +126,7 @@ public void should_ignore_field_with_transient_keyword() { public void should_ignore_properties_included_in_transient_properties_keyword() { EntityWithTransientPropertiesAnnotationDao dao = mapper.entityWithTransientPropertiesAnnotation( - sessionRule.keyspace(), CqlIdentifier.fromCql("entity")); + SESSION_RULE.keyspace(), CqlIdentifier.fromCql("entity")); int key = keyProvider.incrementAndGet(); EntityWithTransientPropertiesAnnotation entity = diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UdtKeyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UdtKeyIT.java new file mode 100644 index 00000000000..c17cd290451 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UdtKeyIT.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.List; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class UdtKeyIT { + + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static RecordDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + for (String ddlQuery : + ImmutableList.of( + "CREATE TYPE key (value int)", + "CREATE TABLE record(key frozen PRIMARY KEY, value int)", + "CREATE TABLE multi_key_record(key frozen> PRIMARY KEY, value int)")) { + session.execute( + SimpleStatement.builder(ddlQuery) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + + TestMapper mapper = new UdtKeyIT_TestMapperBuilder(SESSION_RULE.session()).build(); + dao = mapper.recordDao(SESSION_RULE.keyspace()); + } + + @Test + public void should_save_and_retrieve_entity_with_udt_pk() { + // Given + Key key = new Key(1); + dao.save(new Record(key, 42)); + + // When + Record record = dao.findByKey(key); + + // Then + assertThat(record.getValue()).isEqualTo(42); + } + + @Test + public void should_save_and_retrieve_entity_with_udt_collection_pk() { + // Given + List key = ImmutableList.of(new Key(1), new Key(2)); + dao.saveMulti(new MultiKeyRecord(key, 42)); + + // When + MultiKeyRecord record = dao.findMultiByKey(key); + + // Then + assertThat(record.getValue()).isEqualTo(42); + } + + @Entity + public static class Key { + private int value; + + public Key() {} + + public Key(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + + public void setValue(int value) { + this.value = value; + } + } + + @Entity + public static class Record { + @PartitionKey private Key key; + private int value; + + public Record() {} + + public Record(Key key, int value) { + this.key = key; + this.value = value; + } + + public Key getKey() { + return key; + } + + public void setKey(Key key) { + this.key = key; + } + + public int getValue() { + return value; + } + + public void setValue(int value) { + this.value = value; + } + } + + @Entity + public static class MultiKeyRecord { + @PartitionKey private List key; + private int value; + + public MultiKeyRecord() {} + + public MultiKeyRecord(List key, int value) { + this.key = key; + this.value = value; + } + + public List getKey() { + return key; + } + + public void setKey(List key) { + this.key = key; + } + + public int getValue() { + return value; + } + + public void setValue(int value) { + this.value = value; + } + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + interface RecordDao { + @Select + Record findByKey(Key key); + + @Insert + void save(Record record); + + @Select + MultiKeyRecord findMultiByKey(List key); + + @Insert + void saveMulti(MultiKeyRecord record); + } + + @Mapper + interface TestMapper { + @DaoFactory + RecordDao recordDao(@DaoKeyspace CqlIdentifier keyspace); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java index eb0fa32e3fa..ebdd2dfd40a 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateCustomIfClauseIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.mapper.annotations.CqlName; @@ -29,8 +32,9 @@ import com.datastax.oss.driver.api.mapper.annotations.Mapper; import com.datastax.oss.driver.api.mapper.annotations.Select; import com.datastax.oss.driver.api.mapper.annotations.Update; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; @@ -45,37 +49,40 @@ import org.junit.rules.TestRule; @Category(ParallelizableTests.class) -@CassandraRequirement(min = "3.11.0", description = "UDT fields in IF clause") +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.11.0", + description = "UDT fields in IF clause") public class UpdateCustomIfClauseIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; - private static InventoryMapper inventoryMapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); - for (String query : createStatements(ccm)) { + for (String query : createStatements(CCM_RULE)) { session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + SimpleStatement.builder(query).setExecutionProfile(SESSION_RULE.slowProfile()).build()); } - inventoryMapper = new UpdateCustomIfClauseIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); + InventoryMapper inventoryMapper = + new UpdateCustomIfClauseIT_InventoryMapperBuilder(session).build(); + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); } @Before public void clearProductData() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); } @@ -90,6 +97,22 @@ public void should_update_entity_if_condition_is_met() { assertThat(dao.updateIfLength(otherProduct, 10).wasApplied()).isEqualTo(true); } + @Test + public void should_update_entity_if_condition_is_met_statement() { + dao.update( + new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); + assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); + + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + assertThat( + SESSION_RULE + .session() + .execute(dao.updateIfLengthStatement(otherProduct, 10)) + .wasApplied()) + .isEqualTo(true); + } + @Test public void should_not_update_entity_if_condition_is_not_met() { dao.update( @@ -101,6 +124,22 @@ public void should_not_update_entity_if_condition_is_not_met() { assertThat(dao.updateIfLength(otherProduct, 20).wasApplied()).isEqualTo(false); } + @Test + public void should_not_update_entity_if_condition_is_not_met_statement() { + dao.update( + new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); + assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); + + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + assertThat( + SESSION_RULE + .session() + .execute(dao.updateIfLengthStatement(otherProduct, 20)) + .wasApplied()) + .isEqualTo(false); + } + @Test public void should_async_update_entity_if_condition_is_met() { dao.update( @@ -129,6 +168,17 @@ public void should_not_async_update_entity_if_condition_is_not_met() { .isEqualTo(false); } + @Test + public void should_update_entity_if_condition_is_met_using_ttl() { + dao.update( + new Product(FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1))); + assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); + + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + assertThat(dao.updateIfLengthUsingTtl(otherProduct, 10).wasApplied()).isEqualTo(true); + } + @Mapper public interface InventoryMapper { @DaoFactory @@ -144,6 +194,12 @@ public interface ProductDao { @Update(customIfClause = "dimensions.length = :length") ResultSet updateIfLength(Product product, int length); + @Update(customIfClause = "dimensions.length = :length", ttl = "20") + ResultSet updateIfLengthUsingTtl(Product product, int length); + + @Update(customIfClause = "dimensions.length = :length") + BoundStatement updateIfLengthStatement(Product product, int length); + @Update(customIfClause = "dimensions.length = :\"Length\"") CompletableFuture updateIfLengthAsync( Product product, @CqlName("\"Length\"") int length); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java index d81627fda5a..3fac733c900 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,6 +23,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; @@ -34,6 +37,7 @@ import com.datastax.oss.driver.api.mapper.annotations.Update; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.categories.ParallelizableTests; import com.datastax.oss.driver.internal.core.util.concurrent.CompletableFutures; @@ -50,34 +54,41 @@ @Category(ParallelizableTests.class) public class UpdateIT extends InventoryITBase { - private static CcmRule ccm = CcmRule.getInstance(); - - private static SessionRule sessionRule = SessionRule.builder(ccm).build(); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccm).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); private static ProductDao dao; private static InventoryMapper inventoryMapper; @BeforeClass public static void setup() { - CqlSession session = sessionRule.session(); - - for (String query : createStatements(ccm)) { - session.execute( - SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); - } + CqlSession session = SESSION_RULE.session(); + SchemaChangeSynchronizer.withLock( + () -> { + for (String query : createStatements(CCM_RULE)) { + session.execute( + SimpleStatement.builder(query) + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + } + session.execute( + SimpleStatement.newInstance("CREATE TABLE only_p_k(id uuid PRIMARY KEY)") + .setExecutionProfile(SESSION_RULE.slowProfile())); + }); inventoryMapper = new UpdateIT_InventoryMapperBuilder(session).build(); - dao = inventoryMapper.productDao(sessionRule.keyspace()); + dao = inventoryMapper.productDao(SESSION_RULE.keyspace()); } @Before public void clearProductData() { - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); session.execute( SimpleStatement.builder("TRUNCATE product") - .setExecutionProfile(sessionRule.slowProfile()) + .setExecutionProfile(SESSION_RULE.slowProfile()) .build()); } @@ -148,7 +159,7 @@ public void should_update_entity_with_timestamp() { long timestamp = 1234; dao.updateWithBoundTimestamp(FLAMETHROWER, timestamp); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -156,6 +167,7 @@ public void should_update_entity_with_timestamp() { "SELECT WRITETIME(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) .one(); + assertThat(row).isNotNull(); long writeTime = row.getLong(0); assertThat(writeTime).isEqualTo(timestamp); } @@ -166,7 +178,7 @@ public void should_update_entity_with_timestamp_literal() { dao.updateWithTimestampLiteral(FLAMETHROWER); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -174,6 +186,7 @@ public void should_update_entity_with_timestamp_literal() { "SELECT WRITETIME(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) .one(); + assertThat(row).isNotNull(); long writeTime = row.getLong(0); assertThat(writeTime).isEqualTo(1000L); } @@ -185,15 +198,16 @@ public void should_update_entity_with_ttl() { int ttl = 100_000; dao.updateWithBoundTtl(FLAMETHROWER, ttl); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( SimpleStatement.newInstance( "SELECT TTL(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) .one(); + assertThat(row).isNotNull(); int writeTime = row.getInt(0); - assertThat(writeTime).isEqualTo(ttl); + assertThat(writeTime).isBetween(ttl - 10, ttl); } @Test @@ -202,15 +216,16 @@ public void should_update_entity_with_ttl_literal() { dao.updateWithTtlLiteral(FLAMETHROWER); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( SimpleStatement.newInstance( "SELECT TTL(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) .one(); + assertThat(row).isNotNull(); int writeTime = row.getInt(0); - assertThat(writeTime).isEqualTo(1000); + assertThat(writeTime).isBetween(990, 1000); } @Test @@ -221,7 +236,7 @@ public void should_update_entity_with_timestamp_asynchronously() { CompletableFutures.getUninterruptibly( dao.updateAsyncWithBoundTimestamp(FLAMETHROWER, timestamp)); - CqlSession session = sessionRule.session(); + CqlSession session = SESSION_RULE.session(); Row row = session .execute( @@ -229,6 +244,7 @@ public void should_update_entity_with_timestamp_asynchronously() { "SELECT WRITETIME(description) FROM product WHERE id = ?", FLAMETHROWER.getId())) .one(); + assertThat(row).isNotNull(); long writeTime = row.getLong(0); assertThat(writeTime).isEqualTo(timestamp); } @@ -243,6 +259,18 @@ public void should_update_entity_if_exists() { assertThat(dao.updateIfExists(otherProduct).wasApplied()).isEqualTo(true); } + @Test + public void should_update_entity_if_exists_statement() { + dao.update(FLAMETHROWER); + assertThat(dao.findById(FLAMETHROWER.getId())).isNotNull(); + + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + assertThat( + SESSION_RULE.session().execute(dao.updateIfExistsStatement(otherProduct)).wasApplied()) + .isEqualTo(true); + } + @Test public void should_not_update_entity_if_not_exists() { assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); @@ -252,6 +280,17 @@ public void should_not_update_entity_if_not_exists() { assertThat(dao.updateIfExists(otherProduct).wasApplied()).isEqualTo(false); } + @Test + public void should_not_update_entity_if_not_exists_statement() { + assertThat(dao.findById(FLAMETHROWER.getId())).isNull(); + + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + assertThat( + SESSION_RULE.session().execute(dao.updateIfExistsStatement(otherProduct)).wasApplied()) + .isEqualTo(false); + } + @Test public void should_update_entity_if_exists_asynchronously() { dao.update(FLAMETHROWER); @@ -279,7 +318,7 @@ public void should_not_update_entity_if_not_exists_asynchronously() { @Test public void should_throw_when_try_to_use_dao_with_update_only_pk() { - assertThatThrownBy(() -> inventoryMapper.onlyPkDao(sessionRule.keyspace())) + assertThatThrownBy(() -> inventoryMapper.onlyPkDao(SESSION_RULE.keyspace())) .isInstanceOf(MapperException.class) .hasMessageContaining("Entity OnlyPK does not have any non PK columns."); } @@ -323,10 +362,10 @@ public void should_not_update_entity_and_return_was_not_applied_async() { @Test public void should_update_entity_without_pk_placeholders_matching_custom_where_in_clause() { // given - ProductWithoutIdDao dao = inventoryMapper.productWithoutIdDao(sessionRule.keyspace()); + ProductWithoutIdDao dao = inventoryMapper.productWithoutIdDao(SESSION_RULE.keyspace()); UUID idOne = UUID.randomUUID(); UUID idTwo = UUID.randomUUID(); - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.newInstance( @@ -334,7 +373,7 @@ public void should_update_entity_without_pk_placeholders_matching_custom_where_i idOne, 1, "a")); - sessionRule + SESSION_RULE .session() .execute( SimpleStatement.newInstance( @@ -426,6 +465,9 @@ public interface ProductDao { @Update(ifExists = true) ResultSet updateIfExists(Product product); + @Update(ifExists = true) + BoundStatement updateIfExistsStatement(Product product); + @Update CompletableFuture updateAsync(Product product); diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateNamingIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateNamingIT.java new file mode 100644 index 00000000000..c1b15b2cbca --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateNamingIT.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy.SET_TO_NULL; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.annotations.Update; +import com.datastax.oss.driver.api.mapper.entity.naming.NamingConvention; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +/** + * For JAVA-2367: ensure that PK column names are properly handled in the WHERE clause of a + * generated UPDATE query. + */ +@Category(ParallelizableTests.class) +public class UpdateNamingIT { + private static final CcmRule CCM_RULE = CcmRule.getInstance(); + private static final SessionRule SESSION_RULE = SessionRule.builder(CCM_RULE).build(); + + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static TestDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = SESSION_RULE.session(); + session.execute( + SimpleStatement.builder("CREATE TABLE foo(mykey int PRIMARY KEY, value int)") + .setExecutionProfile(SESSION_RULE.slowProfile()) + .build()); + + TestMapper mapper = + TestMapper.builder(session).withDefaultKeyspace(SESSION_RULE.keyspace()).build(); + dao = mapper.dao(); + } + + @Test + public void should_update_with_case_insensitive_pk_name() { + dao.update(new Foo(1, 1)); + Foo foo = dao.get(1); + assertThat(foo.getValue()).isEqualTo(1); + } + + @Mapper + public interface TestMapper { + + @DaoFactory + TestDao dao(); + + static MapperBuilder builder(CqlSession session) { + return new UpdateNamingIT_TestMapperBuilder(session); + } + } + + @Dao + @DefaultNullSavingStrategy(SET_TO_NULL) + public interface TestDao { + @Select + Foo get(int key); + + @Update + void update(Foo template); + } + + @Entity + @NamingStrategy(convention = NamingConvention.CASE_INSENSITIVE) + public static class Foo { + @PartitionKey private int myKey; + private int value; + + public Foo() {} + + public Foo(int myKey, int value) { + this.myKey = myKey; + this.value = value; + } + + public int getMyKey() { + return myKey; + } + + public void setMyKey(int myKey) { + this.myKey = myKey; + } + + public int getValue() { + return value; + } + + public void setValue(int value) { + this.value = value; + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateReactiveIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateReactiveIT.java new file mode 100644 index 00000000000..fa171441b50 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/mapper/UpdateReactiveIT.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.mapper; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.annotations.Update; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import io.reactivex.Flowable; +import io.reactivex.Single; +import java.util.UUID; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.6", + description = "Uses UDT fields in IF conditions (CASSANDRA-7423)") +public class UpdateReactiveIT extends InventoryITBase { + + private static CcmRule ccmRule = CcmRule.getInstance(); + + private static SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + private static DseProductDao dao; + + @BeforeClass + public static void setup() { + CqlSession session = sessionRule.session(); + + for (String query : createStatements(ccmRule)) { + session.execute( + SimpleStatement.builder(query).setExecutionProfile(sessionRule.slowProfile()).build()); + } + + DseInventoryMapper dseInventoryMapper = + new UpdateReactiveIT_DseInventoryMapperBuilder(session).build(); + dao = dseInventoryMapper.productDao(sessionRule.keyspace()); + } + + @Before + public void clearProductData() { + CqlSession session = sessionRule.session(); + session.execute( + SimpleStatement.builder("TRUNCATE product") + .setExecutionProfile(sessionRule.slowProfile()) + .build()); + } + + @Test + public void should_update_entity_if_exists_reactive() { + Flowable.fromPublisher(dao.updateReactive(FLAMETHROWER)).blockingSubscribe(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) + .isNotNull(); + + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + ReactiveResultSet rs = dao.updateIfExistsReactive(otherProduct); + assertThat(Flowable.fromPublisher(rs).count().blockingGet()).isOne(); + assertThat( + Single.fromPublisher(rs.getColumnDefinitions()).blockingGet().contains("description")) + .isFalse(); + assertThat(Single.fromPublisher(rs.wasApplied()).blockingGet()).isTrue(); + } + + @Test + public void should_update_entity_if_condition_is_met_reactive() { + Flowable.fromPublisher( + dao.updateReactive( + new Product( + FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1)))) + .blockingSubscribe(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) + .isNotNull(); + Product otherProduct = + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)); + ReactiveResultSet rs = dao.updateIfLengthReactive(otherProduct, 10); + ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); + assertThat(row.wasApplied()).isTrue(); + assertThat(row.getColumnDefinitions().contains("dimensions")).isFalse(); + assertThat(Single.fromPublisher(rs.getColumnDefinitions()).blockingGet().contains("dimensions")) + .isFalse(); + assertThat(Single.fromPublisher(rs.wasApplied()).blockingGet()).isTrue(); + } + + @Test + public void should_not_update_entity_if_condition_is_not_met_reactive() { + Flowable.fromPublisher( + dao.updateReactive( + new Product( + FLAMETHROWER.getId(), "Description for length 10", new Dimensions(10, 1, 1)))) + .blockingSubscribe(); + assertThat(Flowable.fromPublisher(dao.findByIdReactive(FLAMETHROWER.getId())).blockingSingle()) + .isNotNull() + .extracting("description") + .isEqualTo("Description for length 10"); + ReactiveResultSet rs = + dao.updateIfLengthReactive( + new Product(FLAMETHROWER.getId(), "Other description", new Dimensions(1, 1, 1)), 20); + ReactiveRow row = Flowable.fromPublisher(rs).blockingSingle(); + assertThat(row.wasApplied()).isFalse(); + assertThat(row.getColumnDefinitions().contains("dimensions")).isTrue(); + assertThat(Single.fromPublisher(rs.getColumnDefinitions()).blockingGet().contains("dimensions")) + .isTrue(); + assertThat(Single.fromPublisher(rs.wasApplied()).blockingGet()).isFalse(); + } + + @Mapper + public interface DseInventoryMapper { + + @DaoFactory + DseProductDao productDao(@DaoKeyspace CqlIdentifier keyspace); + } + + @Dao + @DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) + public interface DseProductDao { + + @Update + ReactiveResultSet updateReactive(Product product); + + @Update(ifExists = true) + ReactiveResultSet updateIfExistsReactive(Product product); + + @Update(customIfClause = "dimensions.length = :length") + ReactiveResultSet updateIfLengthReactive(Product product, int length); + + @Select + MappedReactiveResultSet findByIdReactive(UUID productId); + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/metrics/micrometer/MicrometerMetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/metrics/micrometer/MicrometerMetricsIT.java new file mode 100644 index 00000000000..c38df1e2026 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/metrics/micrometer/MicrometerMetricsIT.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.metrics.micrometer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.core.metrics.MetricsITBase; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; +import com.datastax.oss.driver.internal.metrics.micrometer.MicrometerTags; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import io.micrometer.core.instrument.Counter; +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.Meter; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tag; +import io.micrometer.core.instrument.Timer; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class MicrometerMetricsIT extends MetricsITBase { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); + + @Override + protected SimulacronRule simulacron() { + return SIMULACRON_RULE; + } + + @Override + protected MeterRegistry newMetricRegistry() { + return new SimpleMeterRegistry(); + } + + @Override + protected String getMetricsFactoryClass() { + return "MicrometerMetricsFactory"; + } + + @Override + protected void assertMetricsPresent(CqlSession session) { + + MeterRegistry registry = + (MeterRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); + assertThat(registry).isNotNull(); + + assertThat(registry.getMeters()) + .hasSize(ENABLED_SESSION_METRICS.size() + ENABLED_NODE_METRICS.size() * 3); + + MetricIdGenerator metricIdGenerator = + ((InternalDriverContext) session.getContext()).getMetricIdGenerator(); + + for (DefaultSessionMetric metric : ENABLED_SESSION_METRICS) { + MetricId id = metricIdGenerator.sessionMetricId(metric); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + Meter m = registry.find(id.getName()).tags(tags).meter(); + assertThat(m).isNotNull(); + switch (metric) { + case CONNECTED_NODES: + assertThat(m).isInstanceOf(Gauge.class); + assertThat(((Gauge) m).value()).isEqualTo(3); + break; + case CQL_REQUESTS: + assertThat(m).isInstanceOf(Timer.class); + await().untilAsserted(() -> assertThat(((Timer) m).count()).isEqualTo(30)); + break; + case CQL_PREPARED_CACHE_SIZE: + assertThat(m).isInstanceOf(Gauge.class); + assertThat(((Gauge) m).value()).isOne(); + break; + case BYTES_SENT: + case BYTES_RECEIVED: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).count()).isGreaterThan(0); + break; + case CQL_CLIENT_TIMEOUTS: + case THROTTLING_ERRORS: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).count()).isZero(); + break; + case THROTTLING_DELAY: + assertThat(m).isInstanceOf(Timer.class); + assertThat(((Timer) m).count()).isZero(); + break; + case THROTTLING_QUEUE_SIZE: + assertThat(m).isInstanceOf(Gauge.class); + assertThat(((Gauge) m).value()).isZero(); + break; + } + } + + for (Node node : session.getMetadata().getNodes().values()) { + + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + MetricId id = metricIdGenerator.nodeMetricId(node, metric); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + Meter m = registry.find(id.getName()).tags(tags).meter(); + assertThat(m).isNotNull(); + switch (metric) { + case OPEN_CONNECTIONS: + assertThat(m).isInstanceOf(Gauge.class); + // control node has 2 connections + assertThat(((Gauge) m).value()).isBetween(1.0, 2.0); + break; + case CQL_MESSAGES: + assertThat(m).isInstanceOf(Timer.class); + await().untilAsserted(() -> assertThat(((Timer) m).count()).isEqualTo(10)); + break; + case READ_TIMEOUTS: + case WRITE_TIMEOUTS: + case UNAVAILABLES: + case OTHER_ERRORS: + case ABORTED_REQUESTS: + case UNSENT_REQUESTS: + case RETRIES: + case IGNORES: + case RETRIES_ON_READ_TIMEOUT: + case RETRIES_ON_WRITE_TIMEOUT: + case RETRIES_ON_UNAVAILABLE: + case RETRIES_ON_OTHER_ERROR: + case RETRIES_ON_ABORTED: + case IGNORES_ON_READ_TIMEOUT: + case IGNORES_ON_WRITE_TIMEOUT: + case IGNORES_ON_UNAVAILABLE: + case IGNORES_ON_OTHER_ERROR: + case IGNORES_ON_ABORTED: + case SPECULATIVE_EXECUTIONS: + case CONNECTION_INIT_ERRORS: + case AUTHENTICATION_ERRORS: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).count()).isZero(); + break; + case BYTES_SENT: + case BYTES_RECEIVED: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).count()).isGreaterThan(0.0); + break; + case AVAILABLE_STREAMS: + case IN_FLIGHT: + case ORPHANED_STREAMS: + assertThat(m).isInstanceOf(Gauge.class); + break; + } + } + } + } + + @Override + protected void assertNodeMetricsNotEvicted(CqlSession session, Node node) { + InternalDriverContext context = (InternalDriverContext) session.getContext(); + MetricIdGenerator metricIdGenerator = context.getMetricIdGenerator(); + MeterRegistry registry = (MeterRegistry) context.getMetricRegistry(); + assertThat(registry).isNotNull(); + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + MetricId id = metricIdGenerator.nodeMetricId(node, metric); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + Meter m = registry.find(id.getName()).tags(tags).meter(); + assertThat(m).isNotNull(); + } + } + + @Override + protected void assertMetricsNotPresent(Object registry) { + MeterRegistry micrometerRegistry = (MeterRegistry) registry; + assertThat(micrometerRegistry.getMeters()).isEmpty(); + } + + @Override + protected void assertNodeMetricsEvicted(CqlSession session, Node node) { + InternalDriverContext context = (InternalDriverContext) session.getContext(); + MetricIdGenerator metricIdGenerator = context.getMetricIdGenerator(); + MeterRegistry registry = (MeterRegistry) context.getMetricRegistry(); + assertThat(registry).isNotNull(); + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + MetricId id = metricIdGenerator.nodeMetricId(node, metric); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + Meter m = registry.find(id.getName()).tags(tags).meter(); + assertThat(m).isNull(); + } + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/metrics/microprofile/MicroProfileMetricsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/metrics/microprofile/MicroProfileMetricsIT.java new file mode 100644 index 00000000000..aa04c058a49 --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/metrics/microprofile/MicroProfileMetricsIT.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.metrics.microprofile; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import com.datastax.oss.driver.core.metrics.MetricsITBase; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; +import com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileTags; +import com.datastax.oss.simulacron.common.cluster.ClusterSpec; +import io.smallrye.metrics.MetricsRegistryImpl; +import java.util.ArrayList; +import java.util.List; +import org.eclipse.microprofile.metrics.Counter; +import org.eclipse.microprofile.metrics.Gauge; +import org.eclipse.microprofile.metrics.Meter; +import org.eclipse.microprofile.metrics.Metric; +import org.eclipse.microprofile.metrics.MetricID; +import org.eclipse.microprofile.metrics.MetricRegistry; +import org.eclipse.microprofile.metrics.Tag; +import org.eclipse.microprofile.metrics.Timer; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category(ParallelizableTests.class) +public class MicroProfileMetricsIT extends MetricsITBase { + + @ClassRule + public static final SimulacronRule SIMULACRON_RULE = + new SimulacronRule(ClusterSpec.builder().withNodes(3)); + + @Override + protected SimulacronRule simulacron() { + return SIMULACRON_RULE; + } + + @Override + protected MetricRegistry newMetricRegistry() { + return new MetricsRegistryImpl(); + } + + @Override + protected String getMetricsFactoryClass() { + return "MicroProfileMetricsFactory"; + } + + @Override + protected void assertMetricsPresent(CqlSession session) { + + MetricRegistry registry = + (MetricRegistry) ((InternalDriverContext) session.getContext()).getMetricRegistry(); + assertThat(registry).isNotNull(); + + assertThat(registry.getMetrics()) + .hasSize(ENABLED_SESSION_METRICS.size() + ENABLED_NODE_METRICS.size() * 3); + + MetricIdGenerator metricIdGenerator = + ((InternalDriverContext) session.getContext()).getMetricIdGenerator(); + + for (DefaultSessionMetric metric : ENABLED_SESSION_METRICS) { + MetricId metricId = metricIdGenerator.sessionMetricId(metric); + Tag[] tags = MicroProfileTags.toMicroProfileTags(metricId.getTags()); + MetricID id = new MetricID(metricId.getName(), tags); + Metric m = registry.getMetrics().get(id); + assertThat(m).isNotNull(); + switch (metric) { + case CONNECTED_NODES: + assertThat(m).isInstanceOf(Gauge.class); + assertThat((Integer) ((Gauge) m).getValue()).isEqualTo(3); + break; + case CQL_REQUESTS: + assertThat(m).isInstanceOf(Timer.class); + await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(30)); + break; + case CQL_PREPARED_CACHE_SIZE: + assertThat(m).isInstanceOf(Gauge.class); + assertThat((Long) ((Gauge) m).getValue()).isOne(); + break; + case BYTES_SENT: + case BYTES_RECEIVED: + assertThat(m).isInstanceOf(Meter.class); + assertThat(((Meter) m).getCount()).isGreaterThan(0); + break; + case CQL_CLIENT_TIMEOUTS: + case THROTTLING_ERRORS: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).getCount()).isZero(); + break; + case THROTTLING_DELAY: + assertThat(m).isInstanceOf(Timer.class); + assertThat(((Timer) m).getCount()).isZero(); + break; + case THROTTLING_QUEUE_SIZE: + assertThat(m).isInstanceOf(Gauge.class); + assertThat((Integer) ((Gauge) m).getValue()).isZero(); + break; + } + } + + for (Node node : session.getMetadata().getNodes().values()) { + + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + MetricId description = metricIdGenerator.nodeMetricId(node, metric); + Tag[] tags = MicroProfileTags.toMicroProfileTags(description.getTags()); + MetricID id = new MetricID(description.getName(), tags); + Metric m = registry.getMetrics().get(id); + assertThat(m).isNotNull(); + switch (metric) { + case OPEN_CONNECTIONS: + assertThat(m).isInstanceOf(Gauge.class); + // control node has 2 connections + assertThat((Integer) ((Gauge) m).getValue()).isBetween(1, 2); + break; + case CQL_MESSAGES: + assertThat(m).isInstanceOf(Timer.class); + await().untilAsserted(() -> assertThat(((Timer) m).getCount()).isEqualTo(10)); + break; + case READ_TIMEOUTS: + case WRITE_TIMEOUTS: + case UNAVAILABLES: + case OTHER_ERRORS: + case ABORTED_REQUESTS: + case UNSENT_REQUESTS: + case RETRIES: + case IGNORES: + case RETRIES_ON_READ_TIMEOUT: + case RETRIES_ON_WRITE_TIMEOUT: + case RETRIES_ON_UNAVAILABLE: + case RETRIES_ON_OTHER_ERROR: + case RETRIES_ON_ABORTED: + case IGNORES_ON_READ_TIMEOUT: + case IGNORES_ON_WRITE_TIMEOUT: + case IGNORES_ON_UNAVAILABLE: + case IGNORES_ON_OTHER_ERROR: + case IGNORES_ON_ABORTED: + case SPECULATIVE_EXECUTIONS: + case CONNECTION_INIT_ERRORS: + case AUTHENTICATION_ERRORS: + assertThat(m).isInstanceOf(Counter.class); + assertThat(((Counter) m).getCount()).isZero(); + break; + case BYTES_SENT: + case BYTES_RECEIVED: + assertThat(m).isInstanceOf(Meter.class); + assertThat(((Meter) m).getCount()).isGreaterThan(0L); + break; + case AVAILABLE_STREAMS: + case IN_FLIGHT: + case ORPHANED_STREAMS: + assertThat(m).isInstanceOf(Gauge.class); + break; + } + } + } + } + + @Override + protected void assertNodeMetricsNotEvicted(CqlSession session, Node node) { + InternalDriverContext context = (InternalDriverContext) session.getContext(); + MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); + assertThat(registry).isNotNull(); + for (MetricID id : nodeMetricIds(context, node)) { + assertThat(registry.getMetrics()).containsKey(id); + } + } + + @Override + protected void assertMetricsNotPresent(Object registry) { + MetricRegistry metricRegistry = (MetricRegistry) registry; + assertThat(metricRegistry.getMetrics()).isEmpty(); + } + + @Override + protected void assertNodeMetricsEvicted(CqlSession session, Node node) { + InternalDriverContext context = (InternalDriverContext) session.getContext(); + MetricRegistry registry = (MetricRegistry) context.getMetricRegistry(); + assertThat(registry).isNotNull(); + for (MetricID id : nodeMetricIds(context, node)) { + assertThat(registry.getMetrics()).doesNotContainKey(id); + } + } + + private List nodeMetricIds(InternalDriverContext context, Node node) { + List ids = new ArrayList<>(); + for (DefaultNodeMetric metric : ENABLED_NODE_METRICS) { + MetricId id = context.getMetricIdGenerator().nodeMetricId(node, metric); + ids.add(new MetricID(id.getName(), MicroProfileTags.toMicroProfileTags(id.getTags()))); + } + return ids; + } +} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/BundleOptions.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/BundleOptions.java deleted file mode 100644 index 1a259d04466..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/BundleOptions.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static org.ops4j.pax.exam.CoreOptions.bundle; -import static org.ops4j.pax.exam.CoreOptions.junitBundles; -import static org.ops4j.pax.exam.CoreOptions.mavenBundle; -import static org.ops4j.pax.exam.CoreOptions.options; -import static org.ops4j.pax.exam.CoreOptions.systemProperty; - -import org.ops4j.pax.exam.options.CompositeOption; -import org.ops4j.pax.exam.options.MavenArtifactProvisionOption; -import org.ops4j.pax.exam.options.UrlProvisionOption; -import org.ops4j.pax.exam.util.PathUtils; - -public class BundleOptions { - - public static CompositeOption baseOptions() { - // Note: the bundles below include Netty; these bundles are not required by - // the shaded core driver bundle, but they need to be present in all cases because - // the test-infra bundle requires the (non-shaded) Netty bundle. - return () -> - options( - nettyBundles(), - mavenBundle( - "com.datastax.oss", "java-driver-shaded-guava", getVersion("guava.version")), - mavenBundle("io.dropwizard.metrics", "metrics-core", getVersion("metrics.version")), - mavenBundle("org.slf4j", "slf4j-api", getVersion("slf4j.version")), - mavenBundle("org.hdrhistogram", "HdrHistogram", getVersion("hdrhistogram.version")), - mavenBundle("com.typesafe", "config", getVersion("config.version")), - mavenBundle( - "com.datastax.oss", "native-protocol", getVersion("native-protocol.version")), - logbackBundles(), - systemProperty("logback.configurationFile") - .value("file:" + PathUtils.getBaseDir() + "/src/test/resources/logback-test.xml"), - testBundles()); - } - - public static UrlProvisionOption driverCoreBundle() { - return bundle( - "reference:file:" - + PathUtils.getBaseDir() - + "/../core/target/java-driver-core-" - + getVersion("project.version") - + ".jar"); - } - - public static UrlProvisionOption driverCoreShadedBundle() { - return bundle( - "reference:file:" - + PathUtils.getBaseDir() - + "/../core-shaded/target/java-driver-core-shaded-" - + getVersion("project.version") - + ".jar"); - } - - public static UrlProvisionOption driverQueryBuilderBundle() { - return bundle( - "reference:file:" - + PathUtils.getBaseDir() - + "/../query-builder/target/java-driver-query-builder-" - + getVersion("project.version") - + ".jar"); - } - - public static UrlProvisionOption driverTestInfraBundle() { - return bundle( - "reference:file:" - + PathUtils.getBaseDir() - + "/../test-infra/target/java-driver-test-infra-" - + getVersion("project.version") - + ".jar"); - } - - public static CompositeOption testBundles() { - return () -> - options( - driverTestInfraBundle(), - simulacronBundles(), - jacksonBundles(), - mavenBundle( - "org.apache.commons", "commons-exec", System.getProperty("commons-exec.version")), - mavenBundle("org.assertj", "assertj-core", System.getProperty("assertj.version")), - junitBundles()); - } - - public static CompositeOption nettyBundles() { - String nettyVersion = getVersion("netty.version"); - return () -> - options( - mavenBundle("io.netty", "netty-handler", nettyVersion), - mavenBundle("io.netty", "netty-buffer", nettyVersion), - mavenBundle("io.netty", "netty-codec", nettyVersion), - mavenBundle("io.netty", "netty-common", nettyVersion), - mavenBundle("io.netty", "netty-transport", nettyVersion), - mavenBundle("io.netty", "netty-resolver", nettyVersion)); - } - - public static CompositeOption logbackBundles() { - String logbackVersion = getVersion("logback.version"); - return () -> - options( - mavenBundle("ch.qos.logback", "logback-classic", logbackVersion), - mavenBundle("ch.qos.logback", "logback-core", logbackVersion)); - } - - public static CompositeOption jacksonBundles() { - String jacksonVersion = getVersion("jackson.version"); - return () -> - options( - mavenBundle("com.fasterxml.jackson.core", "jackson-databind", jacksonVersion), - mavenBundle("com.fasterxml.jackson.core", "jackson-core", jacksonVersion), - mavenBundle("com.fasterxml.jackson.core", "jackson-annotations", jacksonVersion)); - } - - public static CompositeOption simulacronBundles() { - String simulacronVersion = getVersion("simulacron.version"); - return () -> - options( - mavenBundle( - "com.datastax.oss.simulacron", "simulacron-native-server", simulacronVersion), - mavenBundle("com.datastax.oss.simulacron", "simulacron-common", simulacronVersion), - mavenBundle( - "com.datastax.oss.simulacron", - "simulacron-native-protocol-json", - simulacronVersion)); - } - - public static MavenArtifactProvisionOption lz4Bundle() { - return mavenBundle("org.lz4", "lz4-java", getVersion("lz4.version")); - } - - public static MavenArtifactProvisionOption snappyBundle() { - return mavenBundle("org.xerial.snappy", "snappy-java", getVersion("snappy.version")); - } - - public static String getVersion(String propertyName) { - String value = System.getProperty(propertyName); - if (value == null) { - throw new IllegalArgumentException(propertyName + " system property is not set"); - } - return value; - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiBaseIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiBaseIT.java deleted file mode 100644 index 934368d6168..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiBaseIT.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; -import static org.assertj.core.api.Assertions.assertThat; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.session.SessionBuilder; -import com.datastax.oss.driver.api.testinfra.ccm.CustomCcmRule; -import com.datastax.oss.driver.categories.IsolatedTests; -import java.util.List; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.ops4j.pax.exam.junit.PaxExam; -import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; -import org.ops4j.pax.exam.spi.reactors.PerClass; - -/** - * Tests the capability of using the driver in an OSGi environment. Note that this relies on - * relative locations of jars in the target directory of their respective modules. It is therefore - * required that you at least run {@code mvn package} before running these tests. - */ -@RunWith(PaxExam.class) -@ExamReactorStrategy(PerClass.class) -@Category(IsolatedTests.class) -public abstract class OsgiBaseIT { - - @ClassRule public static CustomCcmRule ccmRule = CustomCcmRule.builder().withNodes(1).build(); - - /** @return config loader to be used to create session. */ - protected abstract DriverConfigLoader configLoader(); - - /** - * A very simple test that ensures a session can be established and a query made when running in - * an OSGi container. - */ - @Test - public void should_connect_and_query() { - SessionBuilder builder = - CqlSession.builder() - .addContactEndPoints(ccmRule.getContactPoints()) - // use the driver's ClassLoader instead of the OSGI application thread's. - .withClassLoader(CqlSession.class.getClassLoader()) - .withConfigLoader(configLoader()); - try (CqlSession session = builder.build()) { - ResultSet result = session.execute(selectFrom("system", "local").all().build()); - - List rows = result.all(); - assertThat(rows).hasSize(1); - - Row row = rows.get(0); - assertThat(row.getString("key")).isEqualTo("local"); - } - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiCustomLoadBalancingPolicyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiCustomLoadBalancingPolicyIT.java deleted file mode 100644 index 3bf3c948ab8..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiCustomLoadBalancingPolicyIT.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static com.datastax.oss.driver.osgi.BundleOptions.baseOptions; -import static com.datastax.oss.driver.osgi.BundleOptions.driverCoreBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.driverQueryBuilderBundle; -import static org.ops4j.pax.exam.CoreOptions.options; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.Option; - -/** - * Test that uses a policy from a separate bundle from the core driver to ensure that the driver is - * able to load that policy via Reflection. To support this, the driver uses - * DynamicImport-Package: *. - */ -public class OsgiCustomLoadBalancingPolicyIT extends OsgiBaseIT { - - @Configuration - public Option[] config() { - return options(driverCoreBundle(), driverQueryBuilderBundle(), baseOptions()); - } - - @Override - protected DriverConfigLoader configLoader() { - return SessionUtils.configLoaderBuilder() - .withClass( - DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, SortingLoadBalancingPolicy.class) - .build(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiIT.java deleted file mode 100644 index cfebc0d0a14..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiIT.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static com.datastax.oss.driver.osgi.BundleOptions.baseOptions; -import static com.datastax.oss.driver.osgi.BundleOptions.driverCoreBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.driverQueryBuilderBundle; -import static org.ops4j.pax.exam.CoreOptions.options; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.Option; - -public class OsgiIT extends OsgiBaseIT { - - @Configuration - public Option[] config() { - return options(driverCoreBundle(), driverQueryBuilderBundle(), baseOptions()); - } - - @Override - protected DriverConfigLoader configLoader() { - return SessionUtils.configLoaderBuilder().build(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiLz4IT.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiLz4IT.java deleted file mode 100644 index e42b86d86d0..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiLz4IT.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static com.datastax.oss.driver.osgi.BundleOptions.baseOptions; -import static com.datastax.oss.driver.osgi.BundleOptions.driverCoreBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.driverQueryBuilderBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.lz4Bundle; -import static org.ops4j.pax.exam.CoreOptions.options; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.Option; - -public class OsgiLz4IT extends OsgiBaseIT { - - @Configuration - public Option[] config() { - return options(lz4Bundle(), driverCoreBundle(), driverQueryBuilderBundle(), baseOptions()); - } - - @Override - protected DriverConfigLoader configLoader() { - return SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, "lz4") - .build(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiShadedIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiShadedIT.java deleted file mode 100644 index 54131d05b7c..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiShadedIT.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static com.datastax.oss.driver.osgi.BundleOptions.baseOptions; -import static com.datastax.oss.driver.osgi.BundleOptions.driverCoreShadedBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.driverQueryBuilderBundle; -import static org.ops4j.pax.exam.CoreOptions.options; - -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.Option; - -public class OsgiShadedIT extends OsgiBaseIT { - - @Configuration - public Option[] config() { - return options(driverCoreShadedBundle(), driverQueryBuilderBundle(), baseOptions()); - } - - @Override - protected DriverConfigLoader configLoader() { - return SessionUtils.configLoaderBuilder().build(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiSnappyIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiSnappyIT.java deleted file mode 100644 index 9745e4df280..00000000000 --- a/integration-tests/src/test/java/com/datastax/oss/driver/osgi/OsgiSnappyIT.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright DataStax, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.datastax.oss.driver.osgi; - -import static com.datastax.oss.driver.osgi.BundleOptions.baseOptions; -import static com.datastax.oss.driver.osgi.BundleOptions.driverCoreBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.driverQueryBuilderBundle; -import static com.datastax.oss.driver.osgi.BundleOptions.snappyBundle; -import static org.ops4j.pax.exam.CoreOptions.options; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.testinfra.session.SessionUtils; -import org.ops4j.pax.exam.Configuration; -import org.ops4j.pax.exam.Option; - -public class OsgiSnappyIT extends OsgiBaseIT { - - @Configuration - public Option[] config() { - return options(snappyBundle(), driverCoreBundle(), driverQueryBuilderBundle(), baseOptions()); - } - - @Override - protected DriverConfigLoader configLoader() { - return SessionUtils.configLoaderBuilder() - .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, "snappy") - .build(); - } -} diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/api/querybuilder/JsonInsertIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/JsonInsertIT.java similarity index 79% rename from integration-tests/src/test/java/com/datastax/oss/driver/api/querybuilder/JsonInsertIT.java rename to integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/JsonInsertIT.java index e2140783549..4df5c7a62bd 100644 --- a/integration-tests/src/test/java/com/datastax/oss/driver/api/querybuilder/JsonInsertIT.java +++ b/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/JsonInsertIT.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.datastax.oss.driver.api.querybuilder; +package com.datastax.oss.driver.querybuilder; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; @@ -28,8 +30,11 @@ import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; +import com.datastax.oss.driver.api.core.type.codec.ExtraTypeCodecs; +import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.session.SessionRule; import com.datastax.oss.driver.api.testinfra.session.SessionUtils; import com.datastax.oss.driver.categories.ParallelizableTests; @@ -47,32 +52,36 @@ import org.junit.rules.TestRule; @Category(ParallelizableTests.class) -@CassandraRequirement(min = "2.2", description = "JSON support in Cassandra was added in 2.2") +@BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "2.2", + description = "JSON support in Cassandra was added in 2.2") public class JsonInsertIT { - private static final CcmRule ccmRule = CcmRule.getInstance(); - private static final JacksonJsonCodec JACKSON_JSON_CODEC = - new JacksonJsonCodec<>(User.class); + private static final CcmRule CCM_RULE = CcmRule.getInstance(); - private static SessionRule sessionRule = - SessionRule.builder(ccmRule) + private static final SessionRule SESSION_RULE = + SessionRule.builder(CCM_RULE) .withConfigLoader( SessionUtils.configLoaderBuilder() .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) .build()) .build(); - @ClassRule public static TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + @ClassRule + public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE); + + private static final TypeCodec JACKSON_JSON_CODEC = ExtraTypeCodecs.json(User.class); @BeforeClass public static void setup() { - sessionRule + SESSION_RULE .session() .execute("CREATE TABLE json_jackson_row(id int PRIMARY KEY, name text, age int)"); } @After public void clearTable() { - sessionRule.session().execute("TRUNCATE TABLE json_jackson_row"); + SESSION_RULE.session().execute("TRUNCATE TABLE json_jackson_row"); } @Test @@ -195,8 +204,8 @@ public void should_insert_json_using_simple_statement_with_codec_registry() { private CqlSession sessionWithCustomCodec() { return (CqlSession) SessionUtils.baseBuilder() - .addContactEndPoints(ccmRule.getContactPoints()) - .withKeyspace(sessionRule.keyspace()) + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withKeyspace(SESSION_RULE.keyspace()) .addTypeCodecs(JACKSON_JSON_CODEC) .build(); } @@ -205,8 +214,8 @@ private CqlSession sessionWithCustomCodec() { private CqlSession sessionWithoutCustomCodec() { return (CqlSession) SessionUtils.baseBuilder() - .addContactEndPoints(ccmRule.getContactPoints()) - .withKeyspace(sessionRule.keyspace()) + .addContactEndPoints(CCM_RULE.getContactPoints()) + .withKeyspace(SESSION_RULE.keyspace()) .build(); } @@ -247,11 +256,15 @@ public String toString() { } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - User user = (User) o; - return id == user.id && age == user.age && Objects.equals(name, user.name); + public boolean equals(Object other) { + if (this == other) { + return true; + } else if (other instanceof User) { + User that = (User) other; + return this.id == that.id && this.age == that.age && Objects.equals(this.name, that.name); + } else { + return false; + } } @Override diff --git a/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/RelationOptionsIT.java b/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/RelationOptionsIT.java new file mode 100644 index 00000000000..fc571ccf44d --- /dev/null +++ b/integration-tests/src/test/java/com/datastax/oss/driver/querybuilder/RelationOptionsIT.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.querybuilder; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import com.datastax.oss.driver.api.testinfra.ccm.CcmRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.api.testinfra.session.SessionRule; +import com.datastax.oss.driver.categories.ParallelizableTests; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; + +@Category(ParallelizableTests.class) +public class RelationOptionsIT { + + private CcmRule ccmRule = CcmRule.getInstance(); + + private SessionRule sessionRule = SessionRule.builder(ccmRule).build(); + + @Rule public TestRule chain = RuleChain.outerRule(ccmRule).around(sessionRule); + + @Rule public TestName name = new TestName(); + + @Test + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.0", + description = "CRC check chance was moved to top level table in Cassandra 3.0") + public void should_create_table_with_crc_check_chance() { + sessionRule + .session() + .execute( + SchemaBuilder.createTable(name.getMethodName()) + .withPartitionKey("id", DataTypes.INT) + .withColumn("name", DataTypes.TEXT) + .withColumn("age", DataTypes.INT) + .withCRCCheckChance(0.8) + .build()); + KeyspaceMetadata keyspaceMetadata = + sessionRule + .session() + .getMetadata() + .getKeyspace(sessionRule.keyspace()) + .orElseThrow(AssertionError::new); + String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); + + assertThat(describeOutput).contains("crc_check_chance = 0.8"); + } + + @Test + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "5.0", + description = "chunk_length_kb was renamed to chunk_length_in_kb in Cassandra 5.0") + public void should_create_table_with_chunk_length_in_kb() { + sessionRule + .session() + .execute( + SchemaBuilder.createTable(name.getMethodName()) + .withPartitionKey("id", DataTypes.INT) + .withColumn("name", DataTypes.TEXT) + .withColumn("age", DataTypes.INT) + .withLZ4Compression(4096) + .build()); + KeyspaceMetadata keyspaceMetadata = + sessionRule + .session() + .getMetadata() + .getKeyspace(sessionRule.keyspace()) + .orElseThrow(AssertionError::new); + String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); + + assertThat(describeOutput).contains("'class':'org.apache.cassandra.io.compress.LZ4Compressor'"); + assertThat(describeOutput).contains("'chunk_length_in_kb':'4096'"); + } + + @Test + @BackendRequirement( + type = BackendType.CASSANDRA, + minInclusive = "3.0", + maxExclusive = "5.0", + description = + "Deprecated compression options should still work with Cassandra >= 3.0 & < 5.0") + public void should_create_table_with_deprecated_options() { + sessionRule + .session() + .execute( + SchemaBuilder.createTable(name.getMethodName()) + .withPartitionKey("id", DataTypes.INT) + .withColumn("name", DataTypes.TEXT) + .withColumn("age", DataTypes.INT) + .withLZ4Compression(4096, 0.8) + .build()); + KeyspaceMetadata keyspaceMetadata = + sessionRule + .session() + .getMetadata() + .getKeyspace(sessionRule.keyspace()) + .orElseThrow(AssertionError::new); + String describeOutput = keyspaceMetadata.describeWithChildren(true).trim(); + + assertThat(describeOutput).contains("'class':'org.apache.cassandra.io.compress.LZ4Compressor'"); + assertThat(describeOutput).contains("'chunk_length_in_kb':'4096'"); + assertThat(describeOutput).contains("crc_check_chance = 0.8"); + } +} diff --git a/integration-tests/src/test/resources/describe_it_test_2.1.cql b/integration-tests/src/test/resources/DescribeIT/dse/4.8.cql similarity index 94% rename from integration-tests/src/test/resources/describe_it_test_2.1.cql rename to integration-tests/src/test/resources/DescribeIT/dse/4.8.cql index b05df71a503..ea6ca93bcbf 100644 --- a/integration-tests/src/test/resources/describe_it_test_2.1.cql +++ b/integration-tests/src/test/resources/DescribeIT/dse/4.8.cql @@ -1,3 +1,4 @@ + CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; CREATE TYPE ks_0.btype ( @@ -28,7 +29,8 @@ CREATE TABLE ks_0.rank_by_year_and_name ( rank int, cyclist_name text, PRIMARY KEY ((race_year, race_name), rank) -) WITH bloom_filter_fp_chance = 0.01 +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} @@ -62,4 +64,4 @@ CREATE TABLE ks_0.ztable ( AND memtable_flush_period_in_ms = 0 AND min_index_interval = 128 AND read_repair_chance = 0.0 - AND speculative_retry = '99.0PERCENTILE'; \ No newline at end of file + AND speculative_retry = '99.0PERCENTILE'; diff --git a/integration-tests/src/test/resources/describe_it_test_3.0.cql b/integration-tests/src/test/resources/DescribeIT/dse/5.0.cql similarity index 98% rename from integration-tests/src/test/resources/describe_it_test_3.0.cql rename to integration-tests/src/test/resources/DescribeIT/dse/5.0.cql index fe606992a44..2572df52e24 100644 --- a/integration-tests/src/test/resources/describe_it_test_3.0.cql +++ b/integration-tests/src/test/resources/DescribeIT/dse/5.0.cql @@ -1,3 +1,4 @@ + CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; CREATE TYPE ks_0.btype ( @@ -53,7 +54,8 @@ CREATE TABLE ks_0.rank_by_year_and_name ( rank int, cyclist_name text, PRIMARY KEY ((race_year, race_name), rank) -) WITH bloom_filter_fp_chance = 0.01 +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys':'ALL','rows_per_partition':'NONE'} AND comment = '' AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} @@ -184,4 +186,4 @@ CREATE AGGREGATE ks_0.mean(int) SFUNC avgstate STYPE tuple FINALFUNC avgfinal - INITCOND (0,0); \ No newline at end of file + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/describe_it_test_3.11.cql b/integration-tests/src/test/resources/DescribeIT/dse/5.1.cql similarity index 98% rename from integration-tests/src/test/resources/describe_it_test_3.11.cql rename to integration-tests/src/test/resources/DescribeIT/dse/5.1.cql index fe606992a44..2572df52e24 100644 --- a/integration-tests/src/test/resources/describe_it_test_3.11.cql +++ b/integration-tests/src/test/resources/DescribeIT/dse/5.1.cql @@ -1,3 +1,4 @@ + CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; CREATE TYPE ks_0.btype ( @@ -53,7 +54,8 @@ CREATE TABLE ks_0.rank_by_year_and_name ( rank int, cyclist_name text, PRIMARY KEY ((race_year, race_name), rank) -) WITH bloom_filter_fp_chance = 0.01 +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 AND caching = {'keys':'ALL','rows_per_partition':'NONE'} AND comment = '' AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} @@ -184,4 +186,4 @@ CREATE AGGREGATE ks_0.mean(int) SFUNC avgstate STYPE tuple FINALFUNC avgfinal - INITCOND (0,0); \ No newline at end of file + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/dse/6.8.cql b/integration-tests/src/test/resources/DescribeIT/dse/6.8.cql new file mode 100644 index 00000000000..bdeb4737748 --- /dev/null +++ b/integration-tests/src/test/resources/DescribeIT/dse/6.8.cql @@ -0,0 +1,201 @@ + +CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; + +CREATE TYPE ks_0.btype ( + a text +); + +CREATE TYPE ks_0.xtype ( + d text +); + +CREATE TYPE ks_0.ztype ( + c text, + a int +); + +CREATE TYPE ks_0.ctype ( + z frozen, + x frozen +); + +CREATE TYPE ks_0.atype ( + c frozen +); + +CREATE TABLE ks_0.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH additional_write_policy = '99PERCENTILE' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); + +CREATE TABLE ks_0.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank DESC) + AND additional_write_policy = '99PERCENTILE' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); + +CREATE TABLE ks_0.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH additional_write_policy = '99PERCENTILE' + AND bloom_filter_fp_chance = 0.1 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','max_threshold':'32','min_threshold':'4','sstable_size_in_mb':'95'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS +SELECT * FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99PERCENTILE' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99PERCENTILE' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = 'simple view' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99PERCENTILE' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE FUNCTION ks_0.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION ks_0.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE ks_0.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE ks_0.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/hcd/1.0.cql b/integration-tests/src/test/resources/DescribeIT/hcd/1.0.cql new file mode 100644 index 00000000000..abc70728206 --- /dev/null +++ b/integration-tests/src/test/resources/DescribeIT/hcd/1.0.cql @@ -0,0 +1,186 @@ + +CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; + +CREATE TYPE ks_0.btype ( + a text +); + +CREATE TYPE ks_0.xtype ( + d text +); + +CREATE TYPE ks_0.ztype ( + c text, + a int +); + +CREATE TYPE ks_0.ctype ( + z frozen, + x frozen +); + +CREATE TYPE ks_0.atype ( + c frozen +); + +CREATE TABLE ks_0.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); + +CREATE TABLE ks_0.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank DESC) + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); + +CREATE TABLE ks_0.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.1 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','max_threshold':'32','min_threshold':'4','sstable_size_in_mb':'95'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS +SELECT * FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = 'simple view' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE FUNCTION ks_0.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION ks_0.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE ks_0.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE ks_0.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/2.1.cql b/integration-tests/src/test/resources/DescribeIT/oss/2.1.cql new file mode 100644 index 00000000000..ea6ca93bcbf --- /dev/null +++ b/integration-tests/src/test/resources/DescribeIT/oss/2.1.cql @@ -0,0 +1,67 @@ + +CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; + +CREATE TYPE ks_0.btype ( + a text +); + +CREATE TYPE ks_0.xtype ( + d text +); + +CREATE TYPE ks_0.ztype ( + c text, + a int +); + +CREATE TYPE ks_0.ctype ( + z frozen, + x frozen +); + +CREATE TYPE ks_0.atype ( + c frozen +); + +CREATE TABLE ks_0.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} + AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND dclocal_read_repair_chance = 0.1 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99.0PERCENTILE'; + +CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); + +CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); + +CREATE TABLE ks_0.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH bloom_filter_fp_chance = 0.1 + AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} + AND compression = {'sstable_compression':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND default_time_to_live = 0 + AND gc_grace_seconds = 864000 + AND dclocal_read_repair_chance = 0.1 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99.0PERCENTILE'; diff --git a/integration-tests/src/test/resources/describe_it_test_2.2.cql b/integration-tests/src/test/resources/DescribeIT/oss/2.2.cql similarity index 97% rename from integration-tests/src/test/resources/describe_it_test_2.2.cql rename to integration-tests/src/test/resources/DescribeIT/oss/2.2.cql index 5749778e71b..a4035ffa90e 100644 --- a/integration-tests/src/test/resources/describe_it_test_2.2.cql +++ b/integration-tests/src/test/resources/DescribeIT/oss/2.2.cql @@ -1,3 +1,4 @@ + CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; CREATE TYPE ks_0.btype ( @@ -51,7 +52,8 @@ CREATE TABLE ks_0.rank_by_year_and_name ( rank int, cyclist_name text, PRIMARY KEY ((race_year, race_name), rank) -) WITH bloom_filter_fp_chance = 0.01 +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND comment = '' AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'} @@ -109,4 +111,4 @@ CREATE AGGREGATE ks_0.mean(int) SFUNC avgstate STYPE tuple FINALFUNC avgfinal - INITCOND (0,0); \ No newline at end of file + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/3.0.cql b/integration-tests/src/test/resources/DescribeIT/oss/3.0.cql new file mode 100644 index 00000000000..2572df52e24 --- /dev/null +++ b/integration-tests/src/test/resources/DescribeIT/oss/3.0.cql @@ -0,0 +1,189 @@ + +CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; + +CREATE TYPE ks_0.btype ( + a text +); + +CREATE TYPE ks_0.xtype ( + d text +); + +CREATE TYPE ks_0.ztype ( + c text, + a int +); + +CREATE TYPE ks_0.ctype ( + z frozen, + x frozen +); + +CREATE TYPE ks_0.atype ( + c frozen +); + +CREATE TABLE ks_0.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); + +CREATE TABLE ks_0.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); + +CREATE TABLE ks_0.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH bloom_filter_fp_chance = 0.1 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS +SELECT * FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = 'simple view' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE FUNCTION ks_0.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION ks_0.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE ks_0.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE ks_0.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/3.11.cql b/integration-tests/src/test/resources/DescribeIT/oss/3.11.cql new file mode 100644 index 00000000000..2572df52e24 --- /dev/null +++ b/integration-tests/src/test/resources/DescribeIT/oss/3.11.cql @@ -0,0 +1,189 @@ + +CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; + +CREATE TYPE ks_0.btype ( + a text +); + +CREATE TYPE ks_0.xtype ( + d text +); + +CREATE TYPE ks_0.ztype ( + c text, + a int +); + +CREATE TYPE ks_0.ctype ( + z frozen, + x frozen +); + +CREATE TYPE ks_0.atype ( + c frozen +); + +CREATE TABLE ks_0.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); + +CREATE TABLE ks_0.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank DESC) + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); + +CREATE TABLE ks_0.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH bloom_filter_fp_chance = 0.1 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','sstable_size_in_mb':'95'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS +SELECT * FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = 'simple view' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND dclocal_read_repair_chance = 0.1 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair_chance = 0.0 + AND speculative_retry = '99PERCENTILE'; + +CREATE FUNCTION ks_0.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION ks_0.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE ks_0.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE ks_0.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/DescribeIT/oss/4.0.cql b/integration-tests/src/test/resources/DescribeIT/oss/4.0.cql new file mode 100644 index 00000000000..abc70728206 --- /dev/null +++ b/integration-tests/src/test/resources/DescribeIT/oss/4.0.cql @@ -0,0 +1,186 @@ + +CREATE KEYSPACE ks_0 WITH replication = { 'class' : 'org.apache.cassandra.locator.SimpleStrategy', 'replication_factor': '1' } AND durable_writes = true; + +CREATE TYPE ks_0.btype ( + a text +); + +CREATE TYPE ks_0.xtype ( + d text +); + +CREATE TYPE ks_0.ztype ( + c text, + a int +); + +CREATE TYPE ks_0.ctype ( + z frozen, + x frozen +); + +CREATE TYPE ks_0.atype ( + c frozen +); + +CREATE TABLE ks_0.cyclist_mv ( + cid uuid, + age int, + birthday date, + country text, + name text, + PRIMARY KEY (cid) +) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE INDEX cyclist_by_country ON ks_0.cyclist_mv (country); + +CREATE TABLE ks_0.rank_by_year_and_name ( + race_year int, + race_name text, + rank int, + cyclist_name text, + PRIMARY KEY ((race_year, race_name), rank) +) WITH CLUSTERING ORDER BY (rank DESC) + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE INDEX rrank ON ks_0.rank_by_year_and_name (rank); + +CREATE INDEX ryear ON ks_0.rank_by_year_and_name (race_year); + +CREATE TABLE ks_0.ztable ( + zkey text, + a frozen, + PRIMARY KEY (zkey) +) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.1 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.LeveledCompactionStrategy','max_threshold':'32','min_threshold':'4','sstable_size_in_mb':'95'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_a_age AS +SELECT * FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = 'simple view' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE MATERIALIZED VIEW ks_0.cyclist_by_r_age AS +SELECT + age, + cid, + birthday, + country, + name +FROM ks_0.cyclist_mv +WHERE age IS NOT NULL AND cid IS NOT NULL +PRIMARY KEY (age, cid) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys':'ALL','rows_per_partition':'NONE'} + AND comment = '' + AND compaction = {'class':'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy','max_threshold':'32','min_threshold':'4'} + AND compression = {'chunk_length_in_kb':'64','class':'org.apache.cassandra.io.compress.LZ4Compressor'} + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + +CREATE FUNCTION ks_0.avgfinal(state tuple) + CALLED ON NULL INPUT + RETURNS double + LANGUAGE java + AS 'double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r /= state.getInt(0); return Double.valueOf(r);'; + +CREATE FUNCTION ks_0.avgstate(state tuple,val int) + CALLED ON NULL INPUT + RETURNS tuple + LANGUAGE java + AS 'if (val !=null) { state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue()); } return state;'; + +CREATE AGGREGATE ks_0.average(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); + +CREATE AGGREGATE ks_0.mean(int) + SFUNC avgstate + STYPE tuple + FINALFUNC avgfinal + INITCOND (0,0); diff --git a/integration-tests/src/test/resources/META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService b/integration-tests/src/test/resources/META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService new file mode 100644 index 00000000000..8ad40a9d327 --- /dev/null +++ b/integration-tests/src/test/resources/META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService @@ -0,0 +1 @@ +com.datastax.oss.driver.mapper.GuavaFutureProducerService \ No newline at end of file diff --git a/integration-tests/src/test/resources/application.conf b/integration-tests/src/test/resources/application.conf index 921a93fa6dd..f3ab31bcb76 100644 --- a/integration-tests/src/test/resources/application.conf +++ b/integration-tests/src/test/resources/application.conf @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + # Configuration overrides for integration tests datastax-java-driver { basic { @@ -8,6 +25,9 @@ datastax-java-driver { # (see CcmBridge). local-datacenter = dc1 } + config-reload-interval = 0 + request.timeout = 10 seconds + graph.timeout = 10 seconds } advanced { connection { @@ -20,6 +40,13 @@ datastax-java-driver { trace.interval = 1 second warn-if-set-keyspace = false } + graph { + name = "demo" + } + continuous-paging.timeout { + first-page = 10 seconds + other-pages = 10 seconds + } metrics { // Raise histogram bounds because the tests execute DDL queries with a higher timeout session.cql_requests.highest_latency = 30 seconds diff --git a/integration-tests/src/test/resources/logback-test.xml b/integration-tests/src/test/resources/logback-test.xml index 7a4c0da88a1..a2179e4357b 100644 --- a/integration-tests/src/test/resources/logback-test.xml +++ b/integration-tests/src/test/resources/logback-test.xml @@ -1,12 +1,15 @@ + - - \ No newline at end of file + + diff --git a/licenses/HdrHistogram.txt b/licenses/HdrHistogram.txt new file mode 100644 index 00000000000..401ccfb0ec5 --- /dev/null +++ b/licenses/HdrHistogram.txt @@ -0,0 +1,41 @@ +The code in this repository code was Written by Gil Tene, Michael Barker, +and Matt Warren, and released to the public domain, as explained at +http://creativecommons.org/publicdomain/zero/1.0/ + +For users of this code who wish to consume it under the "BSD" license +rather than under the public domain or CC0 contribution text mentioned +above, the code found under this directory is *also* provided under the +following license (commonly referred to as the BSD 2-Clause License). This +license does not detract from the above stated release of the code into +the public domain, and simply represents an additional license granted by +the Author. + +----------------------------------------------------------------------------- +** Beginning of "BSD 2-Clause License" text. ** + + Copyright (c) 2012, 2013, 2014, 2015, 2016 Gil Tene + Copyright (c) 2014 Michael Barker + Copyright (c) 2014 Matt Warren + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/asm.txt b/licenses/asm.txt new file mode 100644 index 00000000000..c71bb7bac5d --- /dev/null +++ b/licenses/asm.txt @@ -0,0 +1,27 @@ +ASM: a very small and fast Java bytecode manipulation framework +Copyright (c) 2000-2011 INRIA, France Telecom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/jnr-posix.txt b/licenses/jnr-posix.txt new file mode 100644 index 00000000000..4dc4217a306 --- /dev/null +++ b/licenses/jnr-posix.txt @@ -0,0 +1,1076 @@ +jnr-posix is released under a tri EPL/GPL/LGPL license. You can use it, +redistribute it and/or modify it under the terms of the: + + Eclipse Public License version 2.0 + OR + GNU General Public License version 2 + OR + GNU Lesser General Public License version 2.1 + +The complete text of the Eclipse Public License is as follows: + + Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + + 1. DEFINITIONS + + "Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + + "Contributor" means any person or entity that Distributes the Program. + + "Licensed Patents" mean patent claims licensable by a Contributor which + are necessarily infringed by the use or sale of its Contribution alone + or when combined with the Program. + + "Program" means the Contributions Distributed in accordance with this + Agreement. + + "Recipient" means anyone who receives the Program under this Agreement + or any Secondary License (as applicable), including Contributors. + + "Derivative Works" shall mean any work, whether in Source Code or other + form, that is based on (or derived from) the Program and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. + + "Modified Works" shall mean any work in Source Code or other form that + results from an addition to, deletion from, or modification of the + contents of the Program, including, for purposes of clarity any new file + in Source Code form that contains any contents of the Program. Modified + Works shall not include works that contain only declarations, + interfaces, types, classes, structures, or files of the Program solely + in each case in order to link to, bind by name, or subclass the Program + or Modified Works thereof. + + "Distribute" means the acts of a) distributing or b) making available + in any manner that enables the transfer of a copy. + + "Source Code" means the form of a Program preferred for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + "Secondary License" means either the GNU General Public License, + Version 2.0, or any later versions of that license, including any + exceptions or additional permissions as identified by the initial + Contributor. + + 2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + + 3. REQUIREMENTS + + 3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + + 3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + + 3.3 Contributors may not remove or alter any copyright, patent, + trademark, attribution notices, disclaimers of warranty, or limitations + of liability ("notices") contained within the Program from any copy of + the Program which they Distribute, provided that Contributors may add + their own appropriate notices. + + 4. COMMERCIAL DISTRIBUTION + + Commercial distributors of software may accept certain responsibilities + with respect to end users, business partners and the like. While this + license is intended to facilitate the commercial use of the Program, + the Contributor who includes the Program in a commercial product + offering should do so in a manner which does not create potential + liability for other Contributors. Therefore, if a Contributor includes + the Program in a commercial product offering, such Contributor + ("Commercial Contributor") hereby agrees to defend and indemnify every + other Contributor ("Indemnified Contributor") against any losses, + damages and costs (collectively "Losses") arising from claims, lawsuits + and other legal actions brought by a third party against the Indemnified + Contributor to the extent caused by the acts or omissions of such + Commercial Contributor in connection with its distribution of the Program + in a commercial product offering. The obligations in this section do not + apply to any claims or Losses relating to any actual or alleged + intellectual property infringement. In order to qualify, an Indemnified + Contributor must: a) promptly notify the Commercial Contributor in + writing of such claim, and b) allow the Commercial Contributor to control, + and cooperate with the Commercial Contributor in, the defense and any + related settlement negotiations. The Indemnified Contributor may + participate in any such claim at its own expense. + + For example, a Contributor might include the Program in a commercial + product offering, Product X. That Contributor is then a Commercial + Contributor. If that Commercial Contributor then makes performance + claims, or offers warranties related to Product X, those performance + claims and warranties are such Commercial Contributor's responsibility + alone. Under this section, the Commercial Contributor would have to + defend claims against the other Contributors related to those performance + claims and warranties, and if a court requires any other Contributor to + pay any damages as a result, the Commercial Contributor must pay + those damages. + + 5. NO WARRANTY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT + PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" + BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR + IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF + TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR + PURPOSE. Each Recipient is solely responsible for determining the + appropriateness of using and distributing the Program and assumes all + risks associated with its exercise of rights under this Agreement, + including but not limited to the risks and costs of program errors, + compliance with applicable laws, damage to or loss of data, programs + or equipment, and unavailability or interruption of operations. + + 6. DISCLAIMER OF LIABILITY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT + PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS + SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST + PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE + EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + 7. GENERAL + + If any provision of this Agreement is invalid or unenforceable under + applicable law, it shall not affect the validity or enforceability of + the remainder of the terms of this Agreement, and without further + action by the parties hereto, such provision shall be reformed to the + minimum extent necessary to make such provision valid and enforceable. + + If Recipient institutes patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that the + Program itself (excluding combinations of the Program with other software + or hardware) infringes such Recipient's patent(s), then such Recipient's + rights granted under Section 2(b) shall terminate as of the date such + litigation is filed. + + All Recipient's rights under this Agreement shall terminate if it + fails to comply with any of the material terms or conditions of this + Agreement and does not cure such failure in a reasonable period of + time after becoming aware of such noncompliance. If all Recipient's + rights under this Agreement terminate, Recipient agrees to cease use + and distribution of the Program as soon as reasonably practicable. + However, Recipient's obligations under this Agreement and any licenses + granted by Recipient relating to the Program shall continue and survive. + + Everyone is permitted to copy and distribute copies of this Agreement, + but in order to avoid inconsistency the Agreement is copyrighted and + may only be modified in the following manner. The Agreement Steward + reserves the right to publish new versions (including revisions) of + this Agreement from time to time. No one other than the Agreement + Steward has the right to modify this Agreement. The Eclipse Foundation + is the initial Agreement Steward. The Eclipse Foundation may assign the + responsibility to serve as the Agreement Steward to a suitable separate + entity. Each new version of the Agreement will be given a distinguishing + version number. The Program (including Contributions) may always be + Distributed subject to the version of the Agreement under which it was + received. In addition, after a new version of the Agreement is published, + Contributor may elect to Distribute the Program (including its + Contributions) under the new version. + + Except as expressly stated in Sections 2(a) and 2(b) above, Recipient + receives no rights or licenses to the intellectual property of any + Contributor under this Agreement, whether expressly, by implication, + estoppel or otherwise. All rights in the Program not expressly granted + under this Agreement are reserved. Nothing in this Agreement is intended + to be enforceable by any entity that is not a Contributor or Recipient. + No third-party beneficiary rights are created under this Agreement. + + Exhibit A - Form of Secondary Licenses Notice + + "This Source Code may also be made available under the following + Secondary Licenses when the conditions for such availability set forth + in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), + version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. + +The complete text of the GNU General Public License v2 is as follows: + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + License is intended to guarantee your freedom to share and change free + software--to make sure the software is free for all its users. This + General Public License applies to most of the Free Software + Foundation's software and to any other program whose authors commit to + using it. (Some other Free Software Foundation software is covered by + the GNU Library General Public License instead.) You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + this service if you wish), that you receive source code or can get it + if you want it, that you can change the software or use pieces of it + in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid + anyone to deny you these rights or to ask you to surrender the rights. + These restrictions translate to certain responsibilities for you if you + distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must give the recipients all the rights that + you have. You must make sure that they, too, receive or can get the + source code. And you must show them these terms so they know their + rights. + + We protect your rights with two steps: (1) copyright the software, and + (2) offer you this license which gives you legal permission to copy, + distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain + that everyone understands that there is no warranty for this free + software. If the software is modified by someone else and passed on, we + want its recipients to know that what they have is not the original, so + that any problems introduced by others will not reflect on the original + authors' reputations. + + Finally, any free program is threatened constantly by software + patents. We wish to avoid the danger that redistributors of a free + program will individually obtain patent licenses, in effect making the + program proprietary. To prevent this, we have made it clear that any + patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and + modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains + a notice placed by the copyright holder saying it may be distributed + under the terms of this General Public License. The "Program", below, + refers to any such program or work, and a "work based on the Program" + means either the Program or any derivative work under copyright law: + that is to say, a work containing the Program or a portion of it, + either verbatim or with modifications and/or translated into another + language. (Hereinafter, translation is included without limitation in + the term "modification".) Each licensee is addressed as "you". + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running the Program is not restricted, and the output from the Program + is covered only if its contents constitute a work based on the + Program (independent of having been made by running the Program). + Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's + source code as you receive it, in any medium, provided that you + conspicuously and appropriately publish on each copy an appropriate + copyright notice and disclaimer of warranty; keep intact all the + notices that refer to this License and to the absence of any warranty; + and give any other recipients of the Program a copy of this License + along with the Program. + + You may charge a fee for the physical act of transferring a copy, and + you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion + of it, thus forming a work based on the Program, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Program, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Program, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Program. + + In addition, mere aggregation of another work not based on the Program + with the Program (or with a work based on the Program) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, + under Section 2) in object code or executable form under the terms of + Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + + The source code for a work means the preferred form of the work for + making modifications to it. For an executable work, complete source + code means all the source code for all modules it contains, plus any + associated interface definition files, plus the scripts used to + control compilation and installation of the executable. However, as a + special exception, the source code distributed need not include + anything that is normally distributed (in either source or binary + form) with the major components (compiler, kernel, and so on) of the + operating system on which the executable runs, unless that component + itself accompanies the executable. + + If distribution of executable or object code is made by offering + access to copy from a designated place, then offering equivalent + access to copy the source code from the same place counts as + distribution of the source code, even though third parties are not + compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense or distribute the Program is + void, and will automatically terminate your rights under this License. + However, parties who have received copies, or rights, from you under + this License will not have their licenses terminated so long as such + parties remain in full compliance. + + 5. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Program or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Program (or any work based on the + Program), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the + Program), the recipient automatically receives a license from the + original licensor to copy, distribute or modify the Program subject to + these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties to + this License. + + 7. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Program at all. For example, if a patent + license would not permit royalty-free redistribution of the Program by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Program. + + If any portion of this section is held invalid or unenforceable under + any particular circumstance, the balance of the section is intended to + apply and the section as a whole is intended to apply in other + circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system, which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Program under this License + may add an explicit geographical distribution limitation excluding + those countries, so that distribution is permitted only in or among + countries not thus excluded. In such case, this License incorporates + the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions + of the General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the Program + specifies a version number of this License which applies to it and "any + later version", you have the option of following the terms and conditions + either of that version or of any later version published by the Free + Software Foundation. If the Program does not specify a version number of + this License, you may choose any version ever published by the Free Software + Foundation. + + 10. If you wish to incorporate parts of the Program into other free + programs whose distribution conditions are different, write to the author + to ask for permission. For software which is copyrighted by the Free + Software Foundation, write to the Free Software Foundation; we sometimes + make exceptions for this. Our decision will be guided by the two goals + of preserving the free status of all derivatives of our free software and + of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY + FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN + OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES + PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED + OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS + TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE + PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, + REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR + REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, + INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING + OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED + TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY + YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER + PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + +The complete text of the GNU Lesser General Public License 2.1 is as follows: + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + [This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + Licenses are intended to guarantee your freedom to share and change + free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some + specially designated software packages--typically libraries--of the + Free Software Foundation and other authors who decide to use it. You + can use it too, but we suggest you first think carefully about whether + this license or the ordinary General Public License is the better + strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, + not price. Our General Public Licenses are designed to make sure that + you have the freedom to distribute copies of free software (and charge + for this service if you wish); that you receive source code or can get + it if you want it; that you can change the software and use pieces of + it in new free programs; and that you are informed that you can do + these things. + + To protect your rights, we need to make restrictions that forbid + distributors to deny you these rights or to ask you to surrender these + rights. These restrictions translate to certain responsibilities for + you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis + or for a fee, you must give the recipients all the rights that we gave + you. You must make sure that they, too, receive or can get the source + code. If you link other code with the library, you must provide + complete object files to the recipients, so that they can relink them + with the library after making changes to the library and recompiling + it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the + library, and (2) we offer you this license, which gives you legal + permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that + there is no warranty for the free library. Also, if the library is + modified by someone else and passed on, the recipients should know + that what they have is not the original version, so that the original + author's reputation will not be affected by problems that might be + introduced by others. + + Finally, software patents pose a constant threat to the existence of + any free program. We wish to make sure that a company cannot + effectively restrict the users of a free program by obtaining a + restrictive license from a patent holder. Therefore, we insist that + any patent license obtained for a version of the library must be + consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the + ordinary GNU General Public License. This license, the GNU Lesser + General Public License, applies to certain designated libraries, and + is quite different from the ordinary General Public License. We use + this license for certain libraries in order to permit linking those + libraries into non-free programs. + + When a program is linked with a library, whether statically or using + a shared library, the combination of the two is legally speaking a + combined work, a derivative of the original library. The ordinary + General Public License therefore permits such linking only if the + entire combination fits its criteria of freedom. The Lesser General + Public License permits more lax criteria for linking other code with + the library. + + We call this license the "Lesser" General Public License because it + does Less to protect the user's freedom than the ordinary General + Public License. It also provides other free software developers Less + of an advantage over competing non-free programs. These disadvantages + are the reason we use the ordinary General Public License for many + libraries. However, the Lesser license provides advantages in certain + special circumstances. + + For example, on rare occasions, there may be a special need to + encourage the widest possible use of a certain library, so that it becomes + a de-facto standard. To achieve this, non-free programs must be + allowed to use the library. A more frequent case is that a free + library does the same job as widely used non-free libraries. In this + case, there is little to gain by limiting the free library to free + software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free + programs enables a greater number of people to use a large body of + free software. For example, permission to use the GNU C Library in + non-free programs enables many more people to use the whole GNU + operating system, as well as its variant, the GNU/Linux operating + system. + + Although the Lesser General Public License is Less protective of the + users' freedom, it does ensure that the user of a program that is + linked with the Library has the freedom and the wherewithal to run + that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and + modification follow. Pay close attention to the difference between a + "work based on the library" and a "work that uses the library". The + former contains code derived from the library, whereas the latter must + be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other + program which contains a notice placed by the copyright holder or + other authorized party saying it may be distributed under the terms of + this Lesser General Public License (also called "this License"). + Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data + prepared so as to be conveniently linked with application programs + (which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work + which has been distributed under these terms. A "work based on the + Library" means either the Library or any derivative work under + copyright law: that is to say, a work containing the Library or a + portion of it, either verbatim or with modifications and/or translated + straightforwardly into another language. (Hereinafter, translation is + included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for + making modifications to it. For a library, complete source code means + all the source code for all modules it contains, plus any associated + interface definition files, plus the scripts used to control compilation + and installation of the library. + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running a program using the Library is not restricted, and output from + such a program is covered only if its contents constitute a work based + on the Library (independent of the use of the Library in a tool for + writing it). Whether that is true depends on what the Library does + and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's + complete source code as you receive it, in any medium, provided that + you conspicuously and appropriately publish on each copy an + appropriate copyright notice and disclaimer of warranty; keep intact + all the notices that refer to this License and to the absence of any + warranty; and distribute a copy of this License along with the + Library. + + You may charge a fee for the physical act of transferring a copy, + and you may at your option offer warranty protection in exchange for a + fee. + + 2. You may modify your copy or copies of the Library or any portion + of it, thus forming a work based on the Library, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Library, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Library, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote + it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Library. + + In addition, mere aggregation of another work not based on the Library + with the Library (or with a work based on the Library) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public + License instead of this License to a given copy of the Library. To do + this, you must alter all the notices that refer to this License, so + that they refer to the ordinary GNU General Public License, version 2, + instead of to this License. (If a newer version than version 2 of the + ordinary GNU General Public License has appeared, then you can specify + that version instead if you wish.) Do not make any other change in + these notices. + + Once this change is made in a given copy, it is irreversible for + that copy, so the ordinary GNU General Public License applies to all + subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of + the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or + derivative of it, under Section 2) in object code or executable form + under the terms of Sections 1 and 2 above provided that you accompany + it with the complete corresponding machine-readable source code, which + must be distributed under the terms of Sections 1 and 2 above on a + medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy + from a designated place, then offering equivalent access to copy the + source code from the same place satisfies the requirement to + distribute the source code, even though third parties are not + compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the + Library, but is designed to work with the Library by being compiled or + linked with it, is called a "work that uses the Library". Such a + work, in isolation, is not a derivative work of the Library, and + therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library + creates an executable that is a derivative of the Library (because it + contains portions of the Library), rather than a "work that uses the + library". The executable is therefore covered by this License. + Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file + that is part of the Library, the object code for the work may be a + derivative work of the Library even though the source code is not. + Whether this is true is especially significant if the work can be + linked without the Library, or if the work is itself a library. The + threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data + structure layouts and accessors, and small macros and small inline + functions (ten lines or less in length), then the use of the object + file is unrestricted, regardless of whether it is legally a derivative + work. (Executables containing this object code plus portions of the + Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may + distribute the object code for the work under the terms of Section 6. + Any executables containing that work also fall under Section 6, + whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or + link a "work that uses the Library" with the Library to produce a + work containing portions of the Library, and distribute that work + under terms of your choice, provided that the terms permit + modification of the work for the customer's own use and reverse + engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the + Library is used in it and that the Library and its use are covered by + this License. You must supply a copy of this License. If the work + during execution displays copyright notices, you must include the + copyright notice for the Library among them, as well as a reference + directing the user to the copy of this License. Also, you must do one + of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the + Library" must include any data and utility programs needed for + reproducing the executable from it. However, as a special exception, + the materials to be distributed need not include anything that is + normally distributed (in either source or binary form) with the major + components (compiler, kernel, and so on) of the operating system on + which the executable runs, unless that component itself accompanies + the executable. + + It may happen that this requirement contradicts the license + restrictions of other proprietary libraries that do not normally + accompany the operating system. Such a contradiction means you cannot + use both them and the Library together in an executable that you + distribute. + + 7. You may place library facilities that are a work based on the + Library side-by-side in a single library together with other library + facilities not covered by this License, and distribute such a combined + library, provided that the separate distribution of the work based on + the Library and of the other library facilities is otherwise + permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute + the Library except as expressly provided under this License. Any + attempt otherwise to copy, modify, sublicense, link with, or + distribute the Library is void, and will automatically terminate your + rights under this License. However, parties who have received copies, + or rights, from you under this License will not have their licenses + terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Library or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Library (or any work based on the + Library), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the + Library), the recipient automatically receives a license from the + original licensor to copy, distribute, link with or modify the Library + subject to these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties with + this License. + + 11. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Library at all. For example, if a patent + license would not permit royalty-free redistribution of the Library by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Library. + + If any portion of this section is held invalid or unenforceable under any + particular circumstance, the balance of the section is intended to apply, + and the section as a whole is intended to apply in other circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Library under this License may add + an explicit geographical distribution limitation excluding those countries, + so that distribution is permitted only in or among countries not thus + excluded. In such case, this License incorporates the limitation as if + written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new + versions of the Lesser General Public License from time to time. + Such new versions will be similar in spirit to the present version, + but may differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the Library + specifies a version number of this License which applies to it and + "any later version", you have the option of following the terms and + conditions either of that version or of any later version published by + the Free Software Foundation. If the Library does not specify a + license version number, you may choose any version ever published by + the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free + programs whose distribution conditions are incompatible with these, + write to the author to ask for permission. For software which is + copyrighted by the Free Software Foundation, write to the Free + Software Foundation; we sometimes make exceptions for this. Our + decision will be guided by the two goals of preserving the free status + of all derivatives of our free software and of promoting the sharing + and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO + WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. + EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR + OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE + LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME + THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU + FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE + LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING + RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A + FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF + SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest + possible use to the public, we recommend making it free software that + everyone can redistribute and change. You can do so by permitting + redistribution under these terms (or, alternatively, under the terms of the + ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is + safest to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least the + "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Also add information on how to contact you by electronic and paper mail. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the library, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + + That's all there is to it! diff --git a/licenses/jnr-x86asm.txt b/licenses/jnr-x86asm.txt new file mode 100644 index 00000000000..c9583db05fd --- /dev/null +++ b/licenses/jnr-x86asm.txt @@ -0,0 +1,24 @@ + + Copyright (C) 2010 Wayne Meissner + Copyright (c) 2008-2009, Petr Kobalicek + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/reactive-streams.txt b/licenses/reactive-streams.txt new file mode 100644 index 00000000000..1e141c13ddb --- /dev/null +++ b/licenses/reactive-streams.txt @@ -0,0 +1,7 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/slf4j-api.txt b/licenses/slf4j-api.txt new file mode 100644 index 00000000000..bb09a9ad4ec --- /dev/null +++ b/licenses/slf4j-api.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2023 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/manual/.nav b/manual/.nav index f99a21741c8..35e0225438b 100644 --- a/manual/.nav +++ b/manual/.nav @@ -3,4 +3,6 @@ query_builder mapper api_conventions case_sensitivity -osgi \ No newline at end of file +osgi +cloud +developer diff --git a/manual/README.md b/manual/README.md index d4ed76039ce..049ddc8c8e9 100644 --- a/manual/README.md +++ b/manual/README.md @@ -1,3 +1,22 @@ + + ## Manual Driver modules: @@ -6,9 +25,12 @@ Driver modules: * [Query builder](query_builder/): a fluent API to create CQL queries programmatically. * [Mapper](mapper/): generates the boilerplate to execute queries and convert the results into application-level objects. +* [Developer docs](developer/): explains the codebase and internal extension points for advanced + customization. Common topics: * [API conventions](api_conventions/) * [Case sensitivity](case_sensitivity/) -* [OSGi](osgi/) \ No newline at end of file +* [OSGi](osgi/) +* [Cloud](cloud/) diff --git a/manual/api_conventions/README.md b/manual/api_conventions/README.md index a76067ebef2..553392658dd 100644 --- a/manual/api_conventions/README.md +++ b/manual/api_conventions/README.md @@ -1,3 +1,22 @@ + + ## API conventions In previous versions, the driver relied solely on Java visibility rules: everything was either @@ -41,4 +60,4 @@ internalContext.getEventBus().fire(TopologyEvent.forceDown(address)); So the risk of unintentionally using the internal API is very low. To double-check, you can always grep `import com.datastax.oss.driver.internal` in your source files. -[semantic versioning]: http://semver.org/ \ No newline at end of file +[semantic versioning]: http://semver.org/ diff --git a/manual/case_sensitivity/README.md b/manual/case_sensitivity/README.md index 98fa681e718..e9dbf1bf9a8 100644 --- a/manual/case_sensitivity/README.md +++ b/manual/case_sensitivity/README.md @@ -1,3 +1,22 @@ + + ## Case sensitivity ### In Cassandra @@ -106,18 +125,18 @@ For "consuming" methods, string overloads are also provided for convenience, for * in other cases, the string is always assumed to be in CQL form, and converted on the fly with `CqlIdentifier.fromCql`. -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlIdentifier.html -[Row]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/Row.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/UdtValue.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/AccessibleByName.html +[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html +[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html +[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/AccessibleByName.html ### Good practices As should be clear by now, case sensitivity introduces a lot of extra (and arguably unnecessary) complexity. -The Java driver team's recommendation is: +The Java Driver team's recommendation is: > **Always use case-insensitive identifiers in your data model.** @@ -130,4 +149,4 @@ If you worry about readability, use snake case (`shopping_cart`), or simply stic The only reason to use case sensitivity should be if you don't control the data model. In that case, either pass quoted strings to the driver, or use `CqlIdentifier` instances (stored as -constants to avoid creating them over and over). \ No newline at end of file +constants to avoid creating them over and over). diff --git a/manual/cloud/README.md b/manual/cloud/README.md new file mode 100644 index 00000000000..9116b03dac3 --- /dev/null +++ b/manual/cloud/README.md @@ -0,0 +1,150 @@ + + +## Connecting to Astra (Cloud) + +Using the Java Driver to connect to a DataStax Astra database is almost identical to using +the driver to connect to any normal Apache Cassandra® database. The only differences are in how the +driver is configured in an application and that you will need to obtain a *secure connect bundle*. + +### Prerequisites + +1. [Download][Download Maven] and [install][Install Maven] Maven. +2. Create an Astra database on [AWS/Azure/GCP][Create an Astra database - AWS/Azure/GCP]; + alternatively, have a team member provide access to their + Astra database (see instructions for [AWS/Azure/GCP][Access an Astra database - AWS/Azure/GCP]) to + obtain database connection details. +3. Download the secure connect bundle (see instructions for + [AWS/Azure/GCP][Download the secure connect bundle - AWS/Azure/GCP]) that contains connection + information such as contact points and certificates. + +### Procedure + +Create a minimal project structure as explained [here][minimal project structure]. Then modify +`Main.java` using one of the following approaches: + +#### Programmatic configuration + +You can pass the connection information directly to `CqlSession.builder()`: + +```java +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import java.nio.file.Paths; + +public class Main { + + public static void main(String[] args) { + try (CqlSession session = CqlSession.builder() + // make sure you change the path to the secure connect bundle below + .withCloudSecureConnectBundle(Paths.get("/path/to/secure-connect-database_name.zip")) + .withAuthCredentials("user_name","password") + .withKeyspace("keyspace_name") + .build()) { + + // For the sake of example, run a simple query and print the results + ResultSet rs = session.execute("select release_version from system.local"); + Row row = rs.one(); + if (row != null) { + System.out.println(row.getString("release_version")); + } else { + System.out.println("An error occurred."); + } + } + } + } +``` + +The path to the secure connect bundle for your Astra database is specified with +`withCloudSecureConnectBundle()`. The authentication credentials must be specified separately with +`withAuthCredentials()`, and match the username and password that were configured when creating the +Astra database. + +Note the following: + +* an SSL connection will be established automatically. Manual SSL configuration is not allowed, any + settings in the driver configuration (`advanced.ssl-engine-factory`) will be ignored; +* the secure connect bundle contains all of the necessary contact information. Specifying contact + points manually is not allowed, and will result in an error; +* if the driver configuration does not specify an explicit consistency level, it will default to + `LOCAL_QUORUM` (instead of `LOCAL_ONE` when connecting to a normal Cassandra database). + +#### File-based configuration + +Alternatively, the connection information can be specified in the driver's configuration file +(`application.conf`). Merge the following options with any content already present: + +```properties +datastax-java-driver { + basic { + # change this to match the target keyspace + session-keyspace = keyspace_name + cloud { + # change this to match bundle's location; can be either a path on the local filesystem + # or a valid URL, e.g. http://acme.com/path/to/secure-connect-database_name.zip + secure-connect-bundle = /path/to/secure-connect-database_name.zip + } + } + advanced { + auth-provider { + class = PlainTextAuthProvider + # change below to match the appropriate credentials + username = user_name + password = password + } + } +} +``` + +For more information about the driver configuration mechanism, refer to the [driver documentation]. + +With the above configuration, your main Java class can be simplified as shown below: + +```java +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; + +public class Main { + + public static void main(String[] args) { + // Create the CqlSession object; it will read the configuration file and pick the right + // values to connect to the Astra database. + try (CqlSession session = CqlSession.builder().build()) { + + ResultSet rs = session.execute("select release_version from system.local"); + Row row = rs.one(); + if (row != null) { + System.out.println(row.getString("release_version")); + } else { + System.out.println("An error occurred."); + } + } + } +} +``` + +[Download Maven]: https://maven.apache.org/download.cgi +[Install Maven]: https://maven.apache.org/install.html +[Create an Astra database - AWS/Azure/GCP]: https://docs.datastax.com/en/astra/docs/creating-your-astra-database.html +[Access an Astra database - AWS/Azure/GCP]: https://docs.datastax.com/en/astra/docs/obtaining-database-credentials.html#_sharing_your_secure_connect_bundle +[Download the secure connect bundle - AWS/Azure/GCP]: https://docs.datastax.com/en/astra/docs/obtaining-database-credentials.html +[minimal project structure]: ../core/integration/#minimal-project-structure +[driver documentation]: ../core/configuration/ diff --git a/manual/core/README.md b/manual/core/README.md index 9f0bedaee08..5ca4cd7872f 100644 --- a/manual/core/README.md +++ b/manual/core/README.md @@ -1,3 +1,22 @@ + + ## Core driver The core module handles cluster connectivity and request execution. It is published under the @@ -5,9 +24,9 @@ following coordinates: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} ``` @@ -56,6 +75,10 @@ We recommend that you take a look at the [reference configuration](configuration list of available options, and cross-reference with the sub-sections in this manual for more explanations. +By default, `CqlSession.builder().build()` fails immediately if the cluster is not available. If you +want to retry instead, you can set the [reconnect-on-init](reconnection/#at-init-time) option in the +configuration. + ##### Contact points If you don't specify any contact point, the driver defaults to `127.0.0.1:9042`: @@ -227,34 +250,35 @@ See [AccessibleByName] for an explanation of the conversion rules. ##### CQL to Java type mapping -| CQL3 data type | Getter name | Java type | See also | -|---------------------|----------------|----------------------|-------------------------------------| -| ascii | getString | java.lang.String | | -| bigint | getLong | long | | -| blob | getByteBuffer | java.nio.ByteBuffer | | -| boolean | getBoolean | boolean | | -| counter | getLong | long | | -| date | getLocalDate | java.time.LocalDate | [Temporal types](temporal_types/) | -| decimal | getBigDecimal | java.math.BigDecimal | | -| double | getDouble | double | | -| duration | getCqlDuration | [CqlDuration] | [Temporal types](temporal_types/) | -| float | getFloat | float | | -| inet | getInetAddress | java.net.InetAddress | | -| int | getInt | int | | -| list | getList | java.util.List | | -| map | getMap | java.util.Map | | -| set | getSet | java.util.Set | | -| smallint | getShort | short | | -| text | getString | java.lang.String | | -| time | getLocalTime | java.time.LocalTime | [Temporal types](temporal_types/) | -| timestamp | getInstant | java.time.Instant | [Temporal types](temporal_types/) | -| timeuuid | getUuid | java.util.UUID | | -| tinyint | getByte | byte | | -| tuple | getTupleValue | [TupleValue] | [Tuples](tuples/) | -| user-defined types | getUDTValue | [UDTValue] | [User-defined types](udts/) | -| uuid | getUuid | java.util.UUID | | -| varchar | getString | java.lang.String | | -| varint | getVarint | java.math.BigInteger | | +| CQL3 data type | Getter name | Java type | See also | +|--------------------|----------------|----------------------|-----------------------------------| +| ascii | getString | java.lang.String | | +| bigint | getLong | long | | +| blob | getByteBuffer | java.nio.ByteBuffer | | +| boolean | getBoolean | boolean | | +| counter | getLong | long | | +| date | getLocalDate | java.time.LocalDate | [Temporal types](temporal_types/) | +| decimal | getBigDecimal | java.math.BigDecimal | | +| double | getDouble | double | | +| duration | getCqlDuration | [CqlDuration] | [Temporal types](temporal_types/) | +| float | getFloat | float | | +| inet | getInetAddress | java.net.InetAddress | | +| int | getInt | int | | +| list | getList | java.util.List | | +| map | getMap | java.util.Map | | +| set | getSet | java.util.Set | | +| smallint | getShort | short | | +| text | getString | java.lang.String | | +| time | getLocalTime | java.time.LocalTime | [Temporal types](temporal_types/) | +| timestamp | getInstant | java.time.Instant | [Temporal types](temporal_types/) | +| timeuuid | getUuid | java.util.UUID | | +| tinyint | getByte | byte | | +| tuple | getTupleValue | [TupleValue] | [Tuples](tuples/) | +| user-defined types | getUDTValue | [UDTValue] | [User-defined types](udts/) | +| uuid | getUuid | java.util.UUID | | +| varchar | getString | java.lang.String | | +| varint | getBigInteger | java.math.BigInteger | | +| vector | getVector | [CqlVector] | [Custom Codecs](custom_codecs/) | Sometimes the driver has to infer a CQL type from a Java type (for example when handling the values of [simple statements](statements/simple/)); for those that have multiple CQL equivalents, it makes @@ -310,18 +334,19 @@ for (ColumnDefinitions.Definition definition : row.getColumnDefinitions()) { } ``` -[CqlSession]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlSession.html -[CqlSession#builder()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlSession.html#builder-- -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/ResultSet.html -[Row]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/Row.html -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlIdentifier.html -[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/AccessibleByName.html -[GenericType]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[CqlDuration]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/CqlDuration.html -[TupleValue]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/TupleValue.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/UdtValue.html -[SessionBuilder.addContactPoint()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoint-java.net.InetSocketAddress- -[SessionBuilder.addContactPoints()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoints-java.util.Collection- -[SessionBuilder.withLocalDatacenter()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withLocalDatacenter-java.lang.String- - -[CASSANDRA-10145]: https://issues.apache.org/jira/browse/CASSANDRA-10145 \ No newline at end of file +[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html +[CqlSession#builder()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html#builder-- +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html +[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html +[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/AccessibleByName.html +[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html +[CqlDuration]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/CqlDuration.html +[CqlVector]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/CqlVector.html +[TupleValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/TupleValue.html +[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html +[SessionBuilder.addContactPoint()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoint-java.net.InetSocketAddress- +[SessionBuilder.addContactPoints()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoints-java.util.Collection- +[SessionBuilder.withLocalDatacenter()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withLocalDatacenter-java.lang.String- + +[CASSANDRA-10145]: https://issues.apache.org/jira/browse/CASSANDRA-10145 diff --git a/manual/core/address_resolution/README.md b/manual/core/address_resolution/README.md index 7ed2d77c8f4..5b2536feb18 100644 --- a/manual/core/address_resolution/README.md +++ b/manual/core/address_resolution/README.md @@ -1,5 +1,35 @@ + + ## Address resolution +### Quick overview + +The driver uses `system.peers.rpc-address` to connect to newly discovered nodes. For special network +topologies, an address translation component can be plugged in. + +* `advanced.address-translator` in the configuration. +* none by default. Also available: EC2-specific (for deployments that span multiple regions), or + write your own. + +----- + Each node in the Cassandra cluster is uniquely identified by an IP address that the driver will use to establish connections. @@ -88,6 +118,55 @@ datastax-java-driver.advanced.address-translator.class = com.mycompany.MyAddress Note: the contact points provided while creating the `CqlSession` are not translated, only addresses retrieved from or sent by Cassandra nodes are. +### Fixed proxy hostname + +If your client applications access Cassandra through some kind of proxy (eg. with AWS PrivateLink when all Cassandra +nodes are exposed via one hostname pointing to AWS Endpoint), you can configure driver with +`FixedHostNameAddressTranslator` to always translate all node addresses to that same proxy hostname, no matter what IP +address a node has but still using its native transport port. + +To use it, specify the following in the [configuration](../configuration): + +``` +datastax-java-driver.advanced.address-translator.class = FixedHostNameAddressTranslator +advertised-hostname = proxyhostname +``` + +### Fixed proxy hostname per subnet + +When running Cassandra in a private network and accessing it from outside of that private network via some kind of +proxy, we have an option to use `FixedHostNameAddressTranslator`. But for multi-datacenter Cassandra deployments, we +want to have more control over routing queries to a specific datacenter (eg. for optimizing latencies), which requires +setting up a separate proxy per datacenter. + +Normally, each Cassandra datacenter nodes are deployed to a different subnet to support internode communications in the +cluster and avoid IP address collisions. So when Cassandra broadcasts its nodes IP addresses, we can determine which +datacenter that node belongs to by checking its IP address against the given datacenter subnet. + +For such scenarios you can use `SubnetAddressTranslator` to translate node IPs to the datacenter proxy address +associated with it. + +To use it, specify the following in the [configuration](../configuration): +``` +datastax-java-driver.advanced.address-translator { + class = SubnetAddressTranslator + subnet-addresses { + "100.64.0.0/15" = "cassandra.datacenter1.com:9042" + "100.66.0.0/15" = "cassandra.datacenter2.com:9042" + # IPv6 example: + # "::ffff:6440:0/111" = "cassandra.datacenter1.com:9042" + # "::ffff:6442:0/111" = "cassandra.datacenter2.com:9042" + } + # Optional. When configured, addresses not matching the configured subnets are translated to this address. + default-address = "cassandra.datacenter1.com:9042" + # Whether to resolve the addresses once on initialization (if true) or on each node (re-)connection (if false). + # If not configured, defaults to false. + resolve-addresses = false +} +``` + +Such setup is common for running Cassandra on Kubernetes with [k8ssandra](https://docs.k8ssandra.io/). + ### EC2 multi-region If you deploy both Cassandra and client applications on Amazon EC2, and your cluster spans multiple regions, you'll have @@ -96,7 +175,7 @@ to configure your Cassandra nodes to broadcast public RPC addresses. However, this is not always the most cost-effective: if a client and a node are in the same region, it would be cheaper to connect over the private IP. Ideally, you'd want to pick the best address in each case. -The driver provides [Ec2MultiRegionAddressTranslator] which does exactly that. To use it, specify the following in +The driver provides `Ec2MultiRegionAddressTranslator` which does exactly that. To use it, specify the following in the [configuration](../configuration/): ``` @@ -113,8 +192,7 @@ Cassandra node: domain name of the target instance. Then it performs a forward DNS lookup of the domain name; the EC2 DNS does the private/public switch automatically based on location). -[AddressTranslator]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.html -[Ec2MultiRegionAddressTranslator]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/internal/core/addresstranslation/Ec2MultiRegionAddressTranslator.html +[AddressTranslator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/addresstranslation/AddressTranslator.html [cassandra.yaml]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html [rpc_address]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html?scroll=configCassandra_yaml__rpc_address diff --git a/manual/core/async/README.md b/manual/core/async/README.md index e6625704d83..5b4bac3dccf 100644 --- a/manual/core/async/README.md +++ b/manual/core/async/README.md @@ -1,5 +1,34 @@ + + ## Asynchronous programming +### Quick overview + +Async driver methods return Java 8's [CompletionStage]. + +* don't call synchronous methods from asynchronous callbacks (the driver detects that and throws). +* callbacks execute on I/O threads: consider providing your own executor for expensive computations. +* be careful not to accidentally ignore errors thrown from callbacks. + +----- + The driver exposes an asynchronous API that allows you to write fully non-blocking programs. Asynchronous methods return instances of the JDK's [CompletionStage], that can be conveniently chained and composed. @@ -51,8 +80,12 @@ resultStage.thenAccept(resultSet -> System.out.println(Thread.currentThread().ge // prints s0-io-n (I/O pool thread) ``` -As long as you use the asynchronous API, the driver never blocks. You can safely call a driver -method from inside a callback: +As long as you use the asynchronous API, the driver will behave in a non-blocking manner: its +internal threads will almost never block. There are a few exceptions to the rule though: see the +manual page on [non-blocking programming](../non_blocking) for details. + +Because the asynchronous API is non-blocking, you can safely call a driver method from inside a +callback, even when the callback's execution is triggered by a future returned by the driver: ```java // Get the department id for a given user: @@ -193,4 +226,4 @@ documentation for more details and an example. [CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html \ No newline at end of file +[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html diff --git a/manual/core/authentication/README.md b/manual/core/authentication/README.md index b79a0a28801..516e47f558f 100644 --- a/manual/core/authentication/README.md +++ b/manual/core/authentication/README.md @@ -1,7 +1,58 @@ + + ## Authentication -Cassandra's binary protocol supports SASL-based authentication. To enable it, define an -`auth-provider` section in the [configuration](../configuration/) +### Quick overview + +* `advanced.auth-provider` in the configuration. +* disabled by default. Also available: plain-text credentials, GSSAPI (DSE only), or write your own. +* can also be defined programmatically: + [CqlSession.builder().withAuthCredentials][SessionBuilder.withAuthCredentials] or + [CqlSession.builder().withAuthProvider][SessionBuilder.withAuthProvider]. + +----- + +Cassandra's binary protocol supports [SASL]-based authentication. To use it, you must provide an +*auth provider* that will authenticate with the server every time a new connection gets established. + +This can be done in two ways: + +### In the configuration + +Define an `auth-provider` section in the [configuration](../configuration/): + +``` +datastax-java-driver { + advanced.auth-provider { + class = ... + } +} +``` + +The auth provider must be configured before opening a session, it cannot be changed at runtime. + +#### Plain text + +`PlainTextAuthProvider` supports simple username/password authentication (intended to work with the +server-side `PasswordAuthenticator`). The credentials can be changed at runtime, they will be used +for new connection attempts once the configuration gets reloaded. ``` datastax-java-driver { @@ -13,18 +64,195 @@ datastax-java-driver { } ``` -Authentication must be configured before opening a session, it cannot be changed at runtime. +When connecting to DSE, an optional `authorization-id` can also be specified. It will be used for +proxy authentication (logging in as another user or role). If you try to use this feature with an +authenticator that doesn't support it, the authorization id will be ignored. + +``` +datastax-java-driver { + advanced.auth-provider { + class = PlainTextAuthProvider + username = user + password = pass + authorization-id = otherUserOrRole + } +} +``` + +Note that, for backward compatibility with previous driver versions, you can also use the class name +`DsePlainTextAuthProvider` to enable this provider. + +#### GSSAPI (DSE only) + +`DseGssApiAuthProvider` supports GSSAPI authentication against a DSE cluster secured with Kerberos: + +``` +datastax-java-driver { + advanced.auth-provider { + class = DseGssApiAuthProvider + login-configuration { + principal = "user principal here ex cassandra@DATASTAX.COM" + useKeyTab = "true" + refreshKrb5Config = "true" + keyTab = "Path to keytab file here" + } + } + } +``` + +See the comments in [reference.conf] for more details. -[PlainTextAuthProvider] is provided out of the box, for simple username/password authentication -(intended to work with the server-side `PasswordAuthenticator`). The credentials can be changed at -runtime, they will be used for new connection attempts once the configuration gets reloaded. +#### Custom You can also write your own provider; it must implement [AuthProvider] and declare a public constructor with a [DriverContext] argument. +``` +datastax-java-driver { + advanced.auth-provider { + class = com.mycompany.MyCustomAuthProvider + ... // any custom options your provider might use + } +} +``` + +### Programmatically + +You can also pass an authenticator instance while building the session: + +```java +CqlSession session = + CqlSession.builder() + .withAuthProvider(new MyCustomAuthProvider()) + .build(); +``` + +The driver also offers a simple, built-in plain text authentication provider: +[ProgrammaticPlainTextAuthProvider]. The following is equivalent to using `PlainTextAuthProvider` in +the configuration: + +```java +AuthProvider authProvider = new ProgrammaticPlainTextAuthProvider("user", "pass"); + +CqlSession session = + CqlSession.builder() + .withAuthProvider(authProvider) + .build(); +``` + +For convenience, there are shortcuts that take the credentials directly: + +```java +CqlSession session = + CqlSession.builder() + .withAuthCredentials("user", "pass") + .build(); + +// With proxy authentication (DSE only) +CqlSession session = + CqlSession.builder() + .withAuthCredentials("user", "pass", "otherUserOrRole") + .build(); +``` + +One downside of the driver's built-in authentication providers is that the credentials are stored in +clear text in memory; this means they are vulnerable to an attacker who is able to perform memory +dumps. If this is not acceptable for you, consider writing your own [AuthProvider] implementation; +[PlainTextAuthProviderBase] is a good starting point. + +Similarly, [ProgrammaticDseGssApiAuthProvider] lets you configure GSSAPI programmatically: + +```java +import com.datastax.dse.driver.api.core.auth.DseGssApiAuthProviderBase.GssApiOptions; + +javax.security.auth.Subject subject = ...; // do your Kerberos configuration here + +GssApiOptions options = GssApiOptions.builder().withSubject(subject).build(); +CqlSession session = CqlSession.builder() + .withAuthProvider(new ProgrammaticDseGssApiAuthProvider(options)) + .build(); +``` + +For more complex needs (e.g. if building the options once and reusing them doesn't work for you), +you can subclass [DseGssApiAuthProviderBase]. + +### Proxy authentication + +DSE allows a user to connect as another user or role: + +``` +-- Allow bob to connect as alice: +GRANT PROXY.LOGIN ON ROLE 'alice' TO 'bob' +``` + +Once connected, all authorization checks will be performed against the proxy role (alice in this +example). + +To use proxy authentication with the driver, you need to provide the **authorization-id**, in other +words the name of the role you want to connect as. + +Example for plain text authentication: + +``` +datastax-java-driver { + advanced.auth-provider { + class = PlainTextAuthProvider + username = bob + password = bob's password + authorization-id = alice + } + } +``` + +With the GSSAPI (Kerberos) provider: + +``` +datastax-java-driver { + advanced.auth-provider { + class = DseGssApiAuthProvider + authorization-id = alice + login-configuration { + principal = "user principal here ex bob@DATASTAX.COM" + useKeyTab = "true" + refreshKrb5Config = "true" + keyTab = "Path to keytab file here" + } + } + } +``` + +### Proxy execution + +Proxy execution is similar to proxy authentication, but it applies to a single query, not the whole +session. + +``` +-- Allow bob to execute queries as alice: +GRANT PROXY.EXECUTE ON ROLE 'alice' TO 'bob' +``` + +For this scenario, you would **not** add the `authorization-id = alice` to your configuration. +Instead, use [ProxyAuthentication.executeAs] to wrap your query with the correct authorization for +the execution: + +```java +import com.datastax.dse.driver.api.core.auth.ProxyAuthentication; + +SimpleStatement statement = SimpleStatement.newInstance("some query"); +// executeAs returns a new instance, you need to re-assign +statement = ProxyAuthentication.executeAs("alice", statement); +session.execute(statement); +``` [SASL]: https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer -[AuthProvider]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/auth/AuthProvider.html -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/context/DriverContext.html -[PlainTextAuthProvider]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/internal/core/auth/PlainTextAuthProvider.html +[AuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/auth/AuthProvider.html +[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html +[PlainTextAuthProviderBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/auth/PlainTextAuthProviderBase.html +[ProgrammaticPlainTextAuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/auth/ProgrammaticPlainTextAuthProvider.html +[DseGssApiAuthProviderBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/auth/DseGssApiAuthProviderBase.html +[ProgrammaticDseGssApiAuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/auth/ProgrammaticDseGssApiAuthProvider.html +[ProxyAuthentication.executeAs]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/auth/ProxyAuthentication.html#executeAs-java.lang.String-StatementT- +[SessionBuilder.withAuthCredentials]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withAuthCredentials-java.lang.String-java.lang.String- +[SessionBuilder.withAuthProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withAuthProvider-com.datastax.oss.driver.api.core.auth.AuthProvider- +[reference.conf]: ../configuration/reference/ diff --git a/manual/core/bom/README.md b/manual/core/bom/README.md new file mode 100644 index 00000000000..235edcf632c --- /dev/null +++ b/manual/core/bom/README.md @@ -0,0 +1,126 @@ + + +## Bill of Materials (BOM) + +A "Bill Of Materials" is a special Maven descriptor that defines the versions of a set of related +artifacts. + +To import the driver's BOM, add the following section in your application's own POM: + +```xml + + ... + + + + org.apache.cassandra + java-driver-bom + 4.17.0 + pom + import + + + +``` + +This allows you to omit the version when you later reference the driver artifacts: + +```xml + + ... + + + org.apache.cassandra + java-driver-query-builder + + +``` + +The advantage is that this also applies to transitive dependencies. For example, if there is a +third-party library X that depends on `java-driver-core`, and you add a dependency to X in this +project, `java-driver-core` will be set to the BOM version, regardless of which version X declares +in its POM. The driver artifacts are always in sync, however they were pulled into the project. + +### BOM and mapper processor + +If you are using the driver's [object mapper](../../mapper), our recommendation is to declare the +mapper processor in the [annotationProcessorPaths](../../mapper/config/#maven) section of the +compiler plugin configuration. Unfortunately, `` versions don't work there, +this is a known Maven issue ([MCOMPILER-391]). + +As a workaround, you can either declare the mapper processor as a regular dependency in the provided +scope: + +```xml + + + org.apache.cassandra + java-driver-mapper-processor + provided + + +``` + +Or keep it in the compiler plugin, but repeat the version explicitly. In that case, it's probably a +good idea to extract a property to keep it in sync with the BOM: + +```xml + + + 4.17.0 + + + + + org.apache.cassandra + java-driver-bom + ${java-driver.version} + pom + import + + + + + + + org.apache.cassandra + java-driver-mapper-runtime + + + + + + maven-compiler-plugin + + + + + org.apache.cassandra + java-driver-mapper-processor + ${java-driver.version} + + + + + + +``` + +[MCOMPILER-391]: https://issues.apache.org/jira/browse/MCOMPILER-391 diff --git a/manual/core/compression/README.md b/manual/core/compression/README.md index 59c065ab7fb..9f7ae3c4854 100644 --- a/manual/core/compression/README.md +++ b/manual/core/compression/README.md @@ -1,5 +1,34 @@ + + ## Compression +### Quick overview + +Compress request and response bodies to save bandwidth. + +* `advanced.protocol.compression` in the configuration. +* disabled by default. Also available: LZ4, Snappy. +* your application **must** re-declare an explicit dependency to the compression library. + +----- + Cassandra's binary protocol supports optional compression of requests and responses. This reduces network traffic at the cost of a slight CPU overhead, therefore it will likely be beneficial when you have larger payloads, such as: @@ -17,8 +46,7 @@ datastax-java-driver { Compression must be set before opening a session, it cannot be changed at runtime. - -Two algorithms are supported out of the box: [LZ4](https://github.com/jpountz/lz4-java) and +Two algorithms are supported out of the box: [LZ4](https://github.com/yawkat/lz4-java) and [Snappy](http://google.github.io/snappy/). The LZ4 implementation is a good first choice; it offers fallback implementations in case native libraries fail to load and [benchmarks](http://java-performance.info/performance-general-compression/) suggest that it offers @@ -26,7 +54,8 @@ better performance and compression ratios over Snappy. Both implementations rely on third-party libraries, declared by the driver as *optional* dependencies; if you enable compression, you need to explicitly depend on the corresponding library -to pull it into your project. +to pull it into your project (see the [Integration>Driver +dependencies](../integration/#driver-dependencies) section for more details). ### LZ4 @@ -34,9 +63,9 @@ Dependency: ```xml - org.lz4 + at.yawk.lz4 lz4-java - 1.4.1 + 1.10.1 ``` @@ -68,6 +97,8 @@ Dependency: ``` +**Important: Snappy is not supported when building a [GraalVM native image](../graalvm).** + Always double-check the exact Snappy version needed; you can find it in the driver's [parent POM]. -[parent POM]: https://search.maven.org/#artifactdetails%7Ccom.datastax.oss%7Cjava-driver-parent%7C4.1.0%7Cpom \ No newline at end of file +[parent POM]: https://search.maven.org/search?q=g:com.datastax.oss%20AND%20a:java-driver-parent&core=gav diff --git a/manual/core/configuration/README.md b/manual/core/configuration/README.md index e87b0ed25fe..deefadbe3d4 100644 --- a/manual/core/configuration/README.md +++ b/manual/core/configuration/README.md @@ -1,13 +1,38 @@ + + ## Configuration +### Quick overview + The driver's configuration is composed of options, organized in a hierarchical manner. Optionally, it can define *profiles* that customize a set of options for a particular kind of request. -The default implementation is based on the Typesafe Config framework. It can be completely -overridden if needed. - -For a complete list of built-in options, see the [reference configuration][reference.conf]. +* the default implementation is based on the Typesafe Config framework: + * the driver JAR comes with a [reference.conf] file that defines the defaults. + * you can add an `application.conf` file in the classpath (or an absolute path, or an URL). It + only needs to contain the options that you override. + * hot reloading is supported out of the box. +* the config mechanism can be completely overridden by implementing a set of driver interfaces + ([DriverConfig], [DriverExecutionProfile] and [DriverConfigLoader]) +----- ### Concepts @@ -345,9 +370,28 @@ CqlSession session = CqlSession.builder().withConfigLoader(loader).build(); If Typesafe Config doesn't work for you, it is possible to get rid of it entirely. -You will need to provide your own implementations of [DriverConfig] and [DriverExecutionProfile]. -Then write a [DriverConfigLoader] and pass it to the session at initialization, as shown in the -previous sections. Study the built-in implementation (package +Start by excluding Typesafe Config from the list of dependencies required by the driver; if you are +using Maven, this can be achieved as follows: + +```xml + + + org.apache.cassandra + java-driver-core + ... + + + com.typesafe + config + + + + + +``` +Next, you will need to provide your own implementations of [DriverConfig] and +[DriverExecutionProfile]. Then write a [DriverConfigLoader] and pass it to the session at +initialization, as shown in the previous sections. Study the built-in implementation (package `com.datastax.oss.driver.internal.core.config.typesafe`) for reference. Reloading is not mandatory: you can choose not to implement it, and the driver will simply keep @@ -495,16 +539,16 @@ config.getDefaultProfile().getString(MyCustomOption.ADMIN_EMAIL); config.getDefaultProfile().getInt(MyCustomOption.AWESOMENESS_FACTOR); ``` -[DriverConfig]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverConfig.html -[DriverExecutionProfile]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.html -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/context/DriverContext.html -[DriverOption]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverOption.html -[DefaultDriverOption]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DefaultDriverOption.html -[DriverConfigLoader]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html -[DriverConfigLoader.fromClasspath]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromClasspath-java.lang.String- -[DriverConfigLoader.fromFile]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromFile-java.io.File- -[DriverConfigLoader.fromUrl]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromUrl-java.net.URL- -[DriverConfigLoader.programmaticBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#programmaticBuilder-- +[DriverConfig]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfig.html +[DriverExecutionProfile]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverExecutionProfile.html +[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html +[DriverOption]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverOption.html +[DefaultDriverOption]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DefaultDriverOption.html +[DriverConfigLoader]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html +[DriverConfigLoader.fromClasspath]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromClasspath-java.lang.String- +[DriverConfigLoader.fromFile]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromFile-java.io.File- +[DriverConfigLoader.fromUrl]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromUrl-java.net.URL- +[DriverConfigLoader.programmaticBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#programmaticBuilder-- [Typesafe Config]: https://github.com/typesafehub/config [config standard behavior]: https://github.com/typesafehub/config#standard-behavior diff --git a/manual/core/configuration/reference/README.rst b/manual/core/configuration/reference/README.rst index e6da9306a75..d4989ecf641 100644 --- a/manual/core/configuration/reference/README.rst +++ b/manual/core/configuration/reference/README.rst @@ -1,3 +1,21 @@ +.. + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + Reference configuration ----------------------- diff --git a/manual/core/control_connection/README.md b/manual/core/control_connection/README.md index 0c53a34a27e..38544797aed 100644 --- a/manual/core/control_connection/README.md +++ b/manual/core/control_connection/README.md @@ -1,3 +1,22 @@ + + ## Control connection The control connection is a dedicated connection used for administrative tasks: @@ -23,4 +42,4 @@ There are a few options to fine tune the control connection behavior in the `advanced.control-connection` and `advanced.metadata` sections; see the [metadata](../metadata/) pages and the [reference configuration](../configuration/reference/) for all the details. -[Node.getOpenConnections]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#getOpenConnections-- \ No newline at end of file +[Node.getOpenConnections]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getOpenConnections-- diff --git a/manual/core/custom_codecs/README.md b/manual/core/custom_codecs/README.md index 76edfcc9cb7..f3b7be1e3d9 100644 --- a/manual/core/custom_codecs/README.md +++ b/manual/core/custom_codecs/README.md @@ -1,5 +1,44 @@ + + ## Custom codecs +### Quick overview + +Define custom Java to CQL mappings. + +* implement the [TypeCodec] interface, or use one of the alternative codecs in `ExtraTypeCodecs`. +* registering a codec: + * at init time: [CqlSession.builder().addTypeCodecs()][SessionBuilder.addTypeCodecs] + * at runtime: + + ```java + MutableCodecRegistry registry = + (MutableCodecRegistry) session.getContext().getCodecRegistry(); + registry.register(myCodec); + ``` +* using a codec: + * if already registered: `row.get("columnName", MyCustomType.class)` + * otherwise: `row.get("columnName", myCodec)` + +----- + Out of the box, the driver comes with [default CQL to Java mappings](../#cql-to-java-type-mapping). For example, if you read a CQL `text` column, it is mapped to its natural counterpart `java.lang.String`: @@ -19,8 +58,238 @@ Sometimes you might want to use different mappings, for example: Custom codecs allow you to define those dedicated mappings, and plug them into your session. +### Using alternative codecs provided by the driver + +The first thing you can do is use one of the many alternative codecs shipped with the driver. They +are exposed on the [ExtraTypeCodecs] class. In this section we are going to introduce these codecs, +then you will see how to register and use them in the next sections. + +#### Mapping CQL blobs to Java arrays + +The driver default is [TypeCodecs.BLOB], which maps CQL `blob` to Java's [java.nio.ByteBuffer]. +Check out our [CQL blob example] to understand how to manipulate the `ByteBuffer` API correctly. + +If the `ByteBuffer` API is too cumbersome for you, an alternative is to use +[ExtraTypeCodecs.BLOB_TO_ARRAY] which maps CQL blobs to Java's `byte[]`. + +#### Mapping CQL lists to Java arrays + +By default, the driver maps CQL `list` to Java's [java.util.List]. If you prefer to deal with +arrays, the driver offers the following codecs: + +1. For primitive types: + + | Codec | CQL type | Java type | + |---|---|---| + | [ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY] | `list` | `boolean[]` | + | [ExtraTypeCodecs.BYTE_LIST_TO_ARRAY] | `list` | `byte[]` | + | [ExtraTypeCodecs.SHORT_LIST_TO_ARRAY] | `list` | `short[]` | + | [ExtraTypeCodecs.INT_LIST_TO_ARRAY] | `list` | `int[]` | + | [ExtraTypeCodecs.LONG_LIST_TO_ARRAY] | `list` | `long[]` | + | [ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY] | `list` | `float[]` | + | [ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY] | `list` | `double[]` | + +2. For other types, you should use [ExtraTypeCodecs.listToArrayOf(TypeCodec)]; for example, to map + CQL `list` to `String[]`: + + ```java + TypeCodec stringArrayCodec = ExtraTypeCodecs.listToArrayOf(TypeCodecs.TEXT); + ``` + +#### Mapping CQL timestamps to Java "instant" types + +By default, the driver maps CQL `timestamp` to Java's [java.time.Instant] \(using +[TypeCodecs.TIMESTAMP]). This is the most natural mapping, since neither type contains any time zone +information: they just represent absolute points in time. + +The driver also provides codecs to map to a Java `long` representing the number of milliseconds +since the epoch (this is the raw form return by `Instant.toEpochMilli`, and also how Cassandra +stores the value internally). + +In either case, you can pick the time zone that the codec will use for its [format()] and [parse()] +methods. Note that this is only relevant for these two methods (follow the links for more +explanations on how the driver uses them); for regular encoding and decoding, like setting a value +on a bound statement or reading a column from a row, the time zone does not matter. + +| Codec | CQL type | Java type | Time zone used by `format()` and `parse()` | +|---|---|---|---| +| [TypeCodecs.TIMESTAMP] | `timestamp` | `Instant` | System default | +| [ExtraTypeCodecs.TIMESTAMP_UTC] | `timestamp` | `Instant` | UTC | +| [ExtraTypeCodecs.timestampAt(ZoneId)] | `timestamp` | `Instant` | User-provided | +| [ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM] | `timestamp` | `long` | System default | +| [ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC] | `timestamp` | `long` | UTC | +| [ExtraTypeCodecs.timestampMillisAt(ZoneId)] | `timestamp` | `long` | User-provided | + +For example, given the schema: + +``` +CREATE TABLE example (k int PRIMARY KEY, ts timestamp); +INSERT INTO example(k, ts) VALUES (1, 0); +``` + +When reading column `ts`, all `Instant` codecs return `Instant.ofEpochMilli(0)`. But if asked to +format it, they behave differently: + +* `ExtraTypeCodecs.TIMESTAMP_UTC` returns `'1970-01-01T00:00:00.000Z'` +* `ExtraTypeCodecs.timestampAt(ZoneId.of("Europe/Paris")` returns `'1970-01-01T01:00:00.000+01:00'` + +#### Mapping CQL timestamps to `ZonedDateTime` + +If your application works with one single, pre-determined time zone, then you probably would like +the driver to map `timestamp` to [java.time.ZonedDateTime] with a fixed zone. Use one of the +following codecs: + +| Codec | CQL type | Java type | Time zone used by all codec operations | +|---|---|---|---| +| [ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM] | `timestamp` | `ZonedDateTime` | System default | +| [ExtraTypeCodecs.ZONED_TIMESTAMP_UTC] | `timestamp` | `ZonedDateTime` | UTC | +| [ExtraTypeCodecs.zonedTimestampAt(ZoneId)] | `timestamp` | `ZonedDateTime` | User-provided | + +This time, the zone matters for all codec operations, including encoding and decoding. For example, +given the schema: + +``` +CREATE TABLE example (k int PRIMARY KEY, ts timestamp); +INSERT INTO example(k, ts) VALUES (1, 0); +``` + +When reading column `ts`: + +* `ExtraTypeCodecs.ZONED_TIMESTAMP_UTC` returns the same value as + `ZonedDateTime.parse("1970-01-01T00:00Z")` +* `ExtraTypeCodecs.zonedTimestampAt(ZoneId.of("Europe/Paris"))` returns the same value as + `ZonedDateTime.parse("1970-01-01T01:00+01:00[Europe/Paris]")` + +These are two distinct `ZonedDateTime` instances: although they represent the same absolute point in +time, they do not compare as equal. + +#### Mapping CQL timestamps to `LocalDateTime` + +If your application works with one single, pre-determined time zone, but only exposes local +date-times, then you probably would like the driver to map timestamps to [java.time.LocalDateTime] +obtained from a fixed zone. Use one of the following codecs: + +| Codec | CQL type | Java type | Time zone used by all codec operations | +|---|---|---|---| +| [ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM] | `timestamp` | `LocalDateTime` | System default | +| [ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC] | `timestamp` | `LocalDateTime` | UTC | +| [ExtraTypeCodecs.localTimestampAt(ZoneId)] | `timestamp` | `LocalDateTime` | User-provided | + + +Again, the zone matters for all codec operations, including encoding and decoding. For example, +given the schema: + +``` +CREATE TABLE example (k int PRIMARY KEY, ts timestamp); +INSERT INTO example(k, ts) VALUES (1, 0); +``` + +When reading column `ts`: + +* `ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC` returns `LocalDateTime.of(1970, 1, 1, 0, 0)` +* `ExtraTypeCodecs.localTimestampAt(ZoneId.of("Europe/Paris"))` returns `LocalDateTime.of(1970, 1, + 1, 1, 0)` + +#### Storing the time zone in Cassandra + +If your application needs to remember the time zone that each date was entered with, you need to +store it in the database. We suggest using a `tuple`, where the second component +holds the [zone id][java.time.ZoneId]. + +If you follow this guideline, then you can use [ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED] to map +the CQL tuple to [java.time.ZonedDateTime]. + +For example, given the schema: + +``` +CREATE TABLE example(k int PRIMARY KEY, zts tuple); +INSERT INTO example (k, zts) VALUES (1, (0, 'Z')); +INSERT INTO example (k, zts) VALUES (2, (-3600000, 'Europe/Paris')); +``` + +When reading column `zts`, `ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED` returns: + +* `ZonedDateTime.parse("1970-01-01T00:00Z")` for the first row +* `ZonedDateTime.parse("1970-01-01T00:00+01:00[Europe/Paris]")` for the second row + +Each value is read back in the time zone that it was written with. But note that you can still +compare rows on a absolute timeline with the `timestamp` component of the tuple. + +#### Mapping to `Optional` instead of `null` + +If you prefer to deal with [java.util.Optional] in your application instead of nulls, then you can +use [ExtraTypeCodecs.optionalOf(TypeCodec)]: + +```java +TypeCodec> optionalUuidCodec = ExtraTypeCodecs.optionalOf(TypeCodecs.UUID); +``` + +Note that because the CQL native protocol does not distinguish empty collections from null +collection references, this codec will also map empty collections to [Optional.empty()]. + +#### Mapping Java Enums + +Java [Enums] can be mapped to CQL in two ways: + +1. By name: [ExtraTypeCodecs.enumNamesOf(Class)] will create a codec for a given `Enum` class that +maps its constants to their [programmatic names][Enum.name()]. The corresponding CQL column must be +of type `text`. Note that this codec relies on the enum constant names; it is therefore vital that +enum names never change. +1. By ordinal: [ExtraTypeCodecs.enumOrdinalsOf(Class)] will create a codec for a given `Enum` class +that maps its constants to their [ordinal value][Enum.ordinal()]. The corresponding CQL column must +be of type `int`. + + **We strongly recommend against this approach.** It is provided for compatibility with driver 3, + but relying on ordinals is a bad practice: any reordering of the enum constants, or insertion + of a new constant before the end, will change the ordinals. The codec won't fail, but it will + insert different codes and corrupt your data. + + If you really want to use integer codes for storage efficiency, implement an explicit mapping + (for example with a `toCode()` method on your enum type). It is then fairly straightforward to + implement a codec with [MappingCodec](#creating-custom-java-to-cql-mappings-with-mapping-codec), + using `TypeCodecs#INT` as the "inner" codec. + +For example, assuming the following enum: + +```java +public enum WeekDay { + MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY +} +``` + +You can define codecs for it the following ways: + +```java +// MONDAY will be persisted as "MONDAY", TUESDAY as "TUESDAY", etc. +TypeCodec weekDaysByNameCodec = ExtraTypeCodecs.enumNamesOf(WeekDay.class); + +// MONDAY will be persisted as 0, TUESDAY as 1, etc. +TypeCodec weekDaysByNameCodec = ExtraTypeCodecs.enumOrdinalsOf(WeekDay.class); +``` + +#### Mapping Json + +The driver provides out-of-the-box support for mapping Java objects to CQL `text` using the popular +Jackson library. The method [ExtraTypeCodecs.json(Class)] will create a codec for a given Java class +that maps instances of that class to Json strings, using a newly-allocated, default [ObjectMapper]. +It is also possible to pass a custom `ObjectMapper` instance using [ExtraTypeCodecs.json(Class, +ObjectMapper)] instead. + +#### Mapping CQL vectors to Java array + +By default, the driver maps CQL `vector` to the [CqlVector] value type. If you prefer to deal with +arrays, the driver offers the following codec: + +| Codec | CQL type | Java type | +|-------------------------------------------|-----------------|-----------| +| [ExtraTypeCodecs.floatVectorToArray(int)] | `vector` | `float[]` | + +This release only provides a codec for vectors containing float values. + ### Writing codecs +If none of the driver built-in codecs above suits you, it is also possible to roll your own. + To write a custom codec, implement the [TypeCodec] interface. Here is an example that maps a CQL `int` to a Java string containing its textual representation: @@ -79,15 +348,41 @@ important points: ### Using codecs -Once you have your codec, register it when building your session: +Once you have your codec, register it when building your session. The following example registers +`CqlIntToStringCodec` along with a few driver-supplied alternative codecs: ```java -CqlSession session = CqlSession.builder() - .addTypeCodecs(new CqlIntToStringCodec()) +enum WeekDay { MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY }; +class Price { + ... // a custom POJO that will be serialized as JSON +} + +CqlSession session = + CqlSession.builder() + .addTypeCodecs( + new CqlIntToStringCodec(), // user-created codec + ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED, // tuple <-> ZonedDateTime + ExtraTypeCodecs.BLOB_TO_ARRAY, // blob <-> byte[] + ExtraTypeCodecs.arrayOf(TypeCodecs.TEXT), // list <-> String[] + ExtraTypeCodecs.enumNamesOf(WeekDay.class), // text <-> WeekDay + ExtraTypeCodecs.json(Price.class), // text <-> MyJsonPojo + ExtraTypeCodecs.optionalOf(TypeCodecs.UUID) // uuid <-> Optional + ) .build(); ``` -You can now use the new mapping in your code: +You may also add codecs to an existing session at runtime: + +```java +// The cast is required for backward compatibility reasons (registry mutability was introduced in +// 4.3.0). It is safe as long as you didn't write a custom registry implementation. +MutableCodecRegistry registry = + (MutableCodecRegistry) session.getContext().getCodecRegistry(); + +registry.register(new CqlIntToStringCodec()); +``` + +You can now use the new mappings in your code: ```java // cqlsh:ks> desc table test2; @@ -103,35 +398,79 @@ session.execute( .build()); ``` -Custom codecs are used not only for their base type, but also recursively in collections, tuples and -UDTs. For example, once your `int <-> String` codec is registered, you can also read a CQL -`list` as a Java `List`: +In the above example, the driver will look up in the codec registry a codec for CQL `int` and Java +String, and will transparently pick `CqlIntToStringCodec` for that. + +So far our examples have used a Java type with dedicated accessors in the driver: `getString` and +`setString`. But sometimes you won't find suitable accessor methods; for example, there is no +accessor for `ZonedDateTime` or for `Optional`, and yet we registered codecs for these types. + +When you want to retrieve such objects, you need a way to tell the driver which Java type you want. +You do so by using one of the generic `get` and `set` methods: ```java -// cqlsh:ks> desc table test3; -// CREATE TABLE ks.test2 (k int PRIMARY KEY, v list)... -ResultSet rs = session.execute("SELECT * FROM ks.test3 WHERE k = 1"); -List v = rs.one().getList("v", String.class); -``` +// Assuming that ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED was registered +// Assuming that ExtraTypeCodecs.BLOB_TO_ARRAY was registered +// Assuming that ExtraTypeCodecs.arrayOf(TypeCodecs.TEXT) was registered -So far our examples have used a Java type with dedicated accessors in the driver: `getString` and -`setString`. But you can also map your own Java types. For example, let's assume you have a `Price` -class, and have registered a codec that maps it to a particular CQL type. When reading or writing -values, you need a way to tell the driver which Java type you want; this is done with the generic -`get` and `set` methods with an extra *type token* arguments: +// Reading +ZonedDateTime v1 = row.get("v1", ZonedDateTime.class); // assuming column is of type timestamp +byte[] v2 = row.get("v2", byte[].class); // assuming column is of type blob +String[] v3 = row.get("v3", String[].class); // assuming column is of type list + + +// Writing +boundStatement.set("v1", v1, ZonedDateTime.class); +boundStatement.set("v2", v2, byte[].class); +boundStatement.set("v3", v3, String[].class); +``` + +This is also valid for arbitrary Java types. This is particularly useful when dealing with Enums and +JSON mappings, for example our `WeekDay` and `Price` types: + +```java +// Assuming that TypeCodecs.enumNamesOf(WeekDay.class) was registered +// Assuming that TypeCodecs.json(Price.class) was registered + +// Reading +WeekDay v1 = row.get("v1", WeekDay.class); // assuming column is of type text +Price v2 = row.get("v2", Price.class); // assuming column is of type text + +// Writing +boundStatement.set("v1", v1, WeekDay.class); +boundStatement.set("v2", v2, Price.class); +``` + +Note that, because the underlying CQL type is `text` you can still retrieve the column's contents +as a plain string: ```java -GenericType priceType = GenericType.of(Price.class); +// Reading +String enumName = row.getString("v1"); +String priceJson = row.getString("v2"); + +// Writing +boundStatement.setString("v1", enumName); +boundStatement.setString("v2", priceJson); +``` + +And finally, for `Optional`, you will need the `get` and `set` methods with an extra *type +token* argument, because `Optional` is a parameterized type: + +```java +// Assuming that TypeCodecs.optionalOf(TypeCodecs.UUID) was registered // Reading -Price price = row.get("v", priceType); +Optional opt = row.get("v", GenericType.optionalOf(UUID.class)); // Writing -boundStatement.set("v", price, priceType); +boundStatement.set("v", opt, GenericType.optionalOf(UUID.class)); ``` Type tokens are instances of [GenericType]. They are immutable and thread-safe, you should store -them as reusable constants. Generic Java types are fully supported, using the following pattern: +them as reusable constants. The `GenericType` class itself has constants and factory methods to help +creating `GenericType` objects for common types. If you don't see the type you are looking for, a +type token for any Java type can be created using the following pattern: ```java // Notice the '{}': this is an anonymous inner class @@ -140,6 +479,25 @@ GenericType> fooBarType = new GenericType>(){}; Foo v = row.get("v", fooBarType); ``` +Custom codecs are used not only for their base type, but also recursively in collections, tuples and +UDTs. For example, once your Json codec for the `Price` class is registered, you can also read a CQL +`list` as a Java `List`: + +```java +// Assuming that TypeCodecs.json(Price.class) was registered +// Assuming that each element of the list column is a valid Json string + +// Reading +List prices1 = row.getList("v", Price.class); +// alternative method using the generic get method with type token argument: +List prices2 = row.get("v", GenericType.listOf(Price.class)); + +// Writing +boundStatement.setList("v", prices1, Price.class); +// alternative method using the generic set method with type token argument: +boundStatement.set("v", prices2, GenericType.listOf(Price.class)); +``` + Whenever you read or write a value, the driver tries all the built-in mappings first, followed by custom codecs. If two codecs can process the same mapping, the one that was registered first is used. Note that this means that built-in mappings can't be overridden. @@ -159,6 +517,115 @@ String s1 = row.getString("anIntColumn"); // int -> String, will decode String s2 = row.get("anIntColumn", specialCodec); // int -> String, will decode with specialCodec ``` +By doing so, you bypass the codec registry completely and instruct the driver to use the given +codec. Note that it is your responsibility to ensure that the codec can handle the underlying CQL +type (this cannot be enforced at compile-time). + +### Creating custom Java-to-CQL mappings with `MappingCodec` + +The above example, `CqlIntToStringCodec`, could be rewritten to leverage [MappingCodec], an abstract +class that ships with the driver. This class has been designed for situations where we want to +represent a CQL type with a different Java type than the Java type natively supported by the driver, +and the conversion between the former and the latter is straightforward. + +All you have to do is extend `MappingCodec` and implement two methods that perform the conversion +between the supported Java type -- or "inner" type -- and the target Java type -- or "outer" type: + +```java +public class CqlIntToStringCodec extends MappingCodec { + + public CqlIntToStringCodec() { + super(TypeCodecs.INT, GenericType.STRING); + } + + @Nullable + @Override + protected String innerToOuter(@Nullable Integer value) { + return value == null ? null : value.toString(); + } + + @Nullable + @Override + protected Integer outerToInner(@Nullable String value) { + return value == null ? null : Integer.parseInt(value); + } +} +``` + +This technique is especially useful when mapping user-defined types to Java objects. For example, +let's assume the following user-defined type: + +``` +CREATE TYPE coordinates (x int, y int); + ``` + +And let's suppose that we want to map it to the following Java class: + +```java +public class Coordinates { + public final int x; + public final int y; + public Coordinates(int x, int y) { this.x = x; this.y = y; } +} +``` + +All you have to do is create a `MappingCodec` subclass that piggybacks on an existing +`TypeCodec` for the above user-defined type: + +```java +public class CoordinatesCodec extends MappingCodec { + + public CoordinatesCodec(@NonNull TypeCodec innerCodec) { + super(innerCodec, GenericType.of(Coordinates.class)); + } + + @NonNull @Override public UserDefinedType getCqlType() { + return (UserDefinedType) super.getCqlType(); + } + + @Nullable @Override protected Coordinates innerToOuter(@Nullable UdtValue value) { + return value == null ? null : new Coordinates(value.getInt("x"), value.getInt("y")); + } + + @Nullable @Override protected UdtValue outerToInner(@Nullable Coordinates value) { + return value == null ? null : getCqlType().newValue().setInt("x", value.x).setInt("y", value.y); + } +} +``` + +Then the new mapping codec could be registered as follows: + +```java +CqlSession session = ... +CodecRegistry codecRegistry = session.getContext().getCodecRegistry(); +// The target user-defined type +UserDefinedType coordinatesUdt = + session + .getMetadata() + .getKeyspace("...") + .flatMap(ks -> ks.getUserDefinedType("coordinates")) + .orElseThrow(IllegalStateException::new); +// The "inner" codec that handles the conversions from CQL from/to UdtValue +TypeCodec innerCodec = codecRegistry.codecFor(coordinatesUdt); +// The mapping codec that will handle the conversions from/to UdtValue and Coordinates +CoordinatesCodec coordinatesCodec = new CoordinatesCodec(innerCodec); +// Register the new codec +((MutableCodecRegistry) codecRegistry).register(coordinatesCodec); +``` + +...and used just like explained above: + +```java +BoundStatement stmt = ...; +stmt.set("coordinates", new Coordinates(10,20), Coordinates.class); + +Row row = ...; +Coordinates coordinates = row.get("coordinates", Coordinates.class); +``` + +Note: if you need even more advanced mapping capabilities, consider adopting +the driver's [object mapping framework](../../mapper/). + ### Subtype polymorphism Suppose the following class hierarchy: @@ -223,6 +690,59 @@ private static String formatRow(Row row) { } ``` -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html -[GenericType]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[TypeCodec]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html +[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html +[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html +[TypeCodec]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html +[format()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html#format-JavaTypeT- +[parse()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodec.html#parse-java.lang.String- +[MappingCodec]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/MappingCodec.html +[SessionBuilder.addTypeCodecs]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addTypeCodecs-com.datastax.oss.driver.api.core.type.codec.TypeCodec...- + +[Enums]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html +[Enum.name()]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#name-- +[Enum.ordinal()]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#ordinal-- +[java.nio.ByteBuffer]: https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html +[java.util.List]: https://docs.oracle.com/javase/8/docs/api/java/util/List.html +[java.util.Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html +[Optional.empty()]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html#empty-- +[java.time.Instant]: https://docs.oracle.com/javase/8/docs/api/java/time/Instant.html +[java.time.ZonedDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/ZonedDateTime.html +[java.time.LocalDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDateTime.html +[java.time.ZoneId]: https://docs.oracle.com/javase/8/docs/api/java/time/ZoneId.html + +[ExtraTypeCodecs]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html +[ExtraTypeCodecs.BLOB_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#BLOB_TO_ARRAY +[ExtraTypeCodecs.BOOLEAN_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#BOOLEAN_LIST_TO_ARRAY +[ExtraTypeCodecs.BYTE_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#BYTE_LIST_TO_ARRAY +[ExtraTypeCodecs.SHORT_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#SHORT_LIST_TO_ARRAY +[ExtraTypeCodecs.INT_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#INT_LIST_TO_ARRAY +[ExtraTypeCodecs.LONG_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#LONG_LIST_TO_ARRAY +[ExtraTypeCodecs.FLOAT_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#FLOAT_LIST_TO_ARRAY +[ExtraTypeCodecs.DOUBLE_LIST_TO_ARRAY]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#DOUBLE_LIST_TO_ARRAY +[ExtraTypeCodecs.listToArrayOf(TypeCodec)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#listToArrayOf-com.datastax.oss.driver.api.core.type.codec.TypeCodec- +[ExtraTypeCodecs.TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#TIMESTAMP_UTC +[ExtraTypeCodecs.timestampAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#timestampAt-java.time.ZoneId- +[ExtraTypeCodecs.TIMESTAMP_MILLIS_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#TIMESTAMP_MILLIS_SYSTEM +[ExtraTypeCodecs.TIMESTAMP_MILLIS_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#TIMESTAMP_MILLIS_UTC +[ExtraTypeCodecs.timestampMillisAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#timestampMillisAt-java.time.ZoneId- +[ExtraTypeCodecs.ZONED_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#ZONED_TIMESTAMP_SYSTEM +[ExtraTypeCodecs.ZONED_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#ZONED_TIMESTAMP_UTC +[ExtraTypeCodecs.zonedTimestampAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#zonedTimestampAt-java.time.ZoneId- +[ExtraTypeCodecs.LOCAL_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#LOCAL_TIMESTAMP_SYSTEM +[ExtraTypeCodecs.LOCAL_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#LOCAL_TIMESTAMP_UTC +[ExtraTypeCodecs.localTimestampAt(ZoneId)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#localTimestampAt-java.time.ZoneId- +[ExtraTypeCodecs.ZONED_TIMESTAMP_PERSISTED]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#ZONED_TIMESTAMP_PERSISTED +[ExtraTypeCodecs.optionalOf(TypeCodec)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#optionalOf-com.datastax.oss.driver.api.core.type.codec.TypeCodec- +[ExtraTypeCodecs.enumNamesOf(Class)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#enumNamesOf-java.lang.Class- +[ExtraTypeCodecs.enumOrdinalsOf(Class)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#enumOrdinalsOf-java.lang.Class- +[ExtraTypeCodecs.json(Class)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#json-java.lang.Class- +[ExtraTypeCodecs.json(Class, ObjectMapper)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#json-java.lang.Class-com.fasterxml.jackson.databind.ObjectMapper- +[ExtraTypeCodecs.floatVectorToArray(int)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/ExtraTypeCodecs.html#floatVectorToArray-int- + +[TypeCodecs.BLOB]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#BLOB +[TypeCodecs.TIMESTAMP]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#TIMESTAMP + + +[ObjectMapper]: http://fasterxml.github.io/jackson-databind/javadoc/2.10/com/fasterxml/jackson/databind/ObjectMapper.html + +[CQL blob example]: https://github.com/datastax/java-driver/blob/4.x/examples/src/main/java/com/datastax/oss/driver/examples/datatypes/Blobs.java diff --git a/manual/core/detachable_types/README.md b/manual/core/detachable_types/README.md index 9d406be586f..7968835dd8a 100644 --- a/manual/core/detachable_types/README.md +++ b/manual/core/detachable_types/README.md @@ -1,5 +1,31 @@ + + ## Detachable types +### Quick overview + +Advanced topic, only needed if you use Java serialization with driver rows or data types, or create +tuple or UDT types manually. + +----- + Some driver components need to keep an internal reference to their originating [Session]. Under specific circumstances, they can lose that reference, and you might need to reattach them. @@ -124,19 +150,19 @@ Even then, the defaults used by detached objects might be good enough for you: * the default codec registry works if you don't have any [custom codec](../custom_codecs/); * the binary encoding format is stable across modern protocol versions. The last changes were for - collection encoding from v2 to v3; Java driver 4 only supports v3 and above. When in doubt, check + collection encoding from v2 to v3; Java Driver 4 only supports v3 and above. When in doubt, check the "Changes" section of the [protocol specifications]. Otherwise, just make sure you reattach objects any time you deserialize them or create them from scratch. -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html -[CodecRegistry#DEFAULT]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html#DEFAULT -[DataType]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/DataType.html -[Detachable]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/detach/Detachable.html -[Session]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html -[ColumnDefinition]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/ColumnDefinition.html -[Row]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/Row.html +[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html +[CodecRegistry#DEFAULT]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html#DEFAULT +[DataType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/DataType.html +[Detachable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/detach/Detachable.html +[Session]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html +[ColumnDefinition]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ColumnDefinition.html +[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html [Java serialization]: https://docs.oracle.com/javase/tutorial/jndi/objects/serial.html [protocol specifications]: https://github.com/datastax/native-protocol/tree/1.x/src/main/resources diff --git a/manual/core/dse/.nav b/manual/core/dse/.nav new file mode 100644 index 00000000000..c53a353fd33 --- /dev/null +++ b/manual/core/dse/.nav @@ -0,0 +1,2 @@ +graph +geotypes \ No newline at end of file diff --git a/manual/core/dse/README.md b/manual/core/dse/README.md new file mode 100644 index 00000000000..75abeafb3d7 --- /dev/null +++ b/manual/core/dse/README.md @@ -0,0 +1,30 @@ + + +## DSE-specific features + +Some driver features only work with DataStax Enterprise: + +* [Graph](graph/); +* [Geospatial types](geotypes/); +* Proxy and GSSAPI authentication (covered in the [Authentication](../authentication/) page). + +Note that, if you don't use these features, you might be able to exclude certain dependencies in +order to limit the number of JARs in your classpath. See the +[Integration](../integration/#driver-dependencies) page. diff --git a/manual/core/dse/geotypes/README.md b/manual/core/dse/geotypes/README.md new file mode 100644 index 00000000000..eb414de4f8d --- /dev/null +++ b/manual/core/dse/geotypes/README.md @@ -0,0 +1,194 @@ + + +## Geospatial types + +The driver comes with client-side representations of the DSE geospatial data types: [Point], +[LineString] and [Polygon]. + +Note: geospatial types require the [ESRI] library version 1.2 to be present on the classpath. The +DSE driver has a non-optional dependency on that library, but if your application does not use +geotypes at all, it is possible to exclude it to minimize the number of runtime dependencies (see +the [Integration>Driver dependencies](../../integration/#driver-dependencies) section for +more details). If the library cannot be found at runtime, geospatial types won't be available and a +warning will be logged, but the driver will otherwise operate normally (this is also valid for OSGi +deployments). + +### Usage in requests + +Geospatial types can be retrieved from query results like any other value; use the "typed" getter +that takes the class as a second argument: + +```java +// Schema: CREATE TABLE poi(id int PRIMARY KEY, location 'PointType', description text); + +CqlSession session = CqlSession.builder().build() + +Row row = session.execute("SELECT location FROM poi WHERE id = 1").one(); +Point location = row.get(0, Point.class); +``` + +The corresponding setter can be used for insertions: + +```java +PreparedStatement pst = + session.prepare("INSERT INTO poi (id, location, description) VALUES (?, ?, ?)"); +session.execute( + pst.boundStatementBuilder() + .setInt("id", 2) + .set("location", Point.fromCoordinates(2.2945, 48.8584), Point.class) + .setString("description", "Eiffel Tower") + .build()); +``` + +This also works with the vararg syntax where target CQL types are inferred: + +```java +session.execute(pst.bind(2, Point.fromCoordinates(2.2945, 48.8584), "Eiffel Tower")); +``` + +### Client-side API + +The driver provides methods to create instances or inspect existing ones. + +[Point] is a trivial pair of coordinates: + +```java +Point point = Point.fromCoordinates(2.2945, 48.8584); +System.out.println(point.X()); +System.out.println(point.Y()); +``` + +[LineString] is a series of 2 or more points: + +```java +LineString lineString = + LineString.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 30), + Point.fromCoordinates(40, 40)); + +for (Point point : lineString.getPoints()) { + System.out.println(point); +} +``` + +[Polygon] is a planar surface in a two-dimensional XY-plane. You can build a simple polygon from a +list of points: + +```java +Polygon polygon = + Polygon.fromPoints( + Point.fromCoordinates(30, 10), + Point.fromCoordinates(10, 20), + Point.fromCoordinates(20, 40), + Point.fromCoordinates(40, 40)); +``` + +In addition to its exterior boundary, a polygon can have an arbitrary number of interior rings, +possibly nested (the first level defines "lakes" in the shape, the next level "islands" in those +lakes, etc). To create such complex polygons, use the builder: + +```java +Polygon polygon = + Polygon.builder() + .addRing( + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)) + .addRing( + Point.fromCoordinates(1, 1), + Point.fromCoordinates(1, 2), + Point.fromCoordinates(2, 2), + Point.fromCoordinates(2, 1)) + .addRing( + Point.fromCoordinates(3, 1), + Point.fromCoordinates(3, 2), + Point.fromCoordinates(4, 2), + Point.fromCoordinates(4, 1)) + .build(); +``` + +You can then retrieve all the points with the following methods: + +```java +List exteriorRing = polygon.getExteriorRing(); + +for (List interiorRing : polygon.getInteriorRings()) { + ... +} +``` + +Note that all rings (exterior or interior) are defined with the same builder method: you can provide +them in any order, the implementation will figure out which is the exterior one. In addition, points +are always ordered counterclockwise for the exterior ring, clockwise for the first interior level, +counterclockwise for the second level, etc. Again, this is done automatically, so you don't need to +sort them beforehand; however, be prepared to get a different order when you read them back: + +```java +Polygon polygon = + Polygon.fromPoints( + // Clockwise: + Point.fromCoordinates(0, 0), + Point.fromCoordinates(0, 3), + Point.fromCoordinates(5, 3), + Point.fromCoordinates(5, 0)); + +System.out.println(polygon); +// Counterclockwise: +// POLYGON ((0 0, 5 0, 5 3, 0 3, 0 0)) +``` + +All geospatial types interoperate with three standard formats: + +* [Well-known text]\: + + ```java + Point point = Point.fromWellKnownText("POINT (0 1)"); + System.out.println(point.asWellKnownText()); + ``` + +* [Well-known binary]\: + + ```java + import com.datastax.oss.protocol.internal.util.Bytes; + + Point point = + Point.fromWellKnownBinary( + Bytes.fromHexString("0x01010000000000000000000000000000000000f03f")); + System.out.println(Bytes.toHexString(point.asWellKnownBinary())); + ``` + +* [GeoJSON]\: + + ```java + Point point = Point.fromGeoJson("{\"type\":\"Point\",\"coordinates\":[0.0,1.0]}"); + System.out.println(point.asGeoJson()); + ``` + +[ESRI]: https://github.com/Esri/geometry-api-java + +[LineString]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/data/geometry/LineString.html +[Point]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/data/geometry/Point.html +[Polygon]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/data/geometry/Polygon.html + +[Well-known text]: https://en.wikipedia.org/wiki/Well-known_text +[Well-known binary]: https://en.wikipedia.org/wiki/Well-known_text#Well-known_binary +[GeoJSON]: https://tools.ietf.org/html/rfc7946 diff --git a/manual/core/dse/graph/.nav b/manual/core/dse/graph/.nav new file mode 100644 index 00000000000..d7f30c149fc --- /dev/null +++ b/manual/core/dse/graph/.nav @@ -0,0 +1,5 @@ +script +fluent +fluent +options +results \ No newline at end of file diff --git a/manual/core/dse/graph/README.md b/manual/core/dse/graph/README.md new file mode 100644 index 00000000000..6bcacd44c4e --- /dev/null +++ b/manual/core/dse/graph/README.md @@ -0,0 +1,100 @@ + + +## Graph + +The driver provides full support for DSE graph, the distributed graph database available in DataStax +Enterprise. The [CqlSession] interface extends [GraphSession], which adds specialized methods to +execute requests expressed in the [Gremlin] graph traversal language. + +*This manual only covers driver usage; for more information about server-side configuration and data +modeling, refer to the [DSE developer guide].* + +Note: graph capabilities require the [Apache TinkerPop™] library to be present on the classpath. The +driver has a non-optional dependency on that library, but if your application does not use graph at +all, it is possible to exclude it to minimize the number of runtime dependencies (see the +[Integration>Driver dependencies](../../integration/#driver-dependencies) section for more +details). If the library cannot be found at runtime, graph queries won't be available and a warning +will be logged, but the driver will otherwise operate normally (this is also valid for OSGi +deployments). + +If you do use graph, it is important to keep the precise TinkerPop version that the driver depends +on: unlike the driver, TinkerPop does not follow semantic versioning, so even a patch version change +(e.g. 3.3.0 vs 3.3.3) could introduce incompatibilities. So do not declare an explicit dependency in +your application, let the driver pull it transitively. + +### Overview + +There are 3 ways to execute graph requests: + +1. Passing a Gremlin script directly in a plain Java string. We'll refer to this as the + [script API](script/): + + ```java + CqlSession session = CqlSession.builder().build(); + + String script = "g.V().has('name', name)"; + ScriptGraphStatement statement = + ScriptGraphStatement.builder(script) + .withQueryParam("name", "marko") + .build(); + + GraphResultSet result = session.execute(statement); + for (GraphNode node : result) { + System.out.println(node.asVertex()); + } + ``` + +2. Building a traversal with the [TinkerPop fluent API](fluent/), and [executing it + explicitly](fluent/explicit/) with the session: + + ```java + import static com.datastax.dse.driver.api.core.graph.DseGraph.g; + + GraphTraversal traversal = g.V().has("name", "marko"); + FluentGraphStatement statement = FluentGraphStatement.newInstance(traversal); + + GraphResultSet result = session.execute(statement); + for (GraphNode node : result) { + System.out.println(node.asVertex()); + } + ``` + +3. Building a connected traversal with the fluent API, and [executing it + implicitly](fluent/implicit/) by invoking a terminal step: + + ```java + GraphTraversalSource g = DseGraph.g + .withRemote(DseGraph.remoteConnectionBuilder(session).build()); + + List vertices = g.V().has("name", "marko").toList(); + ``` + +All executions modes rely on the same set of [configuration options](options/). + +The script and explicit fluent API return driver-specific [result sets](results/). The implicit +fluent API returns Apache TinkerPop™ types directly. + +[Apache TinkerPop™]: http://tinkerpop.apache.org/ + +[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html +[GraphSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/GraphSession.html + +[DSE developer guide]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/graphTOC.html +[Gremlin]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/dseGraphAbout.html#dseGraphAbout__what-is-cql diff --git a/manual/core/dse/graph/fluent/.nav b/manual/core/dse/graph/fluent/.nav new file mode 100644 index 00000000000..4be448834af --- /dev/null +++ b/manual/core/dse/graph/fluent/.nav @@ -0,0 +1,2 @@ +explicit +implicit \ No newline at end of file diff --git a/manual/core/dse/graph/fluent/README.md b/manual/core/dse/graph/fluent/README.md new file mode 100644 index 00000000000..c1645fdb234 --- /dev/null +++ b/manual/core/dse/graph/fluent/README.md @@ -0,0 +1,137 @@ + + +## Fluent API + +The driver depends on [Apache TinkerPop™], a graph computing framework that provides a fluent API to +build Gremlin traversals. This allows you to write your graph requests directly in Java, like you +would in a Gremlin-groovy script: + +```java +// How this is initialized will depend on the execution model, see details below +GraphTraversalSource g = ... + +GraphTraversal traversal = g.V().has("name", "marko"); +``` + +### Execution models + +There are two ways to execute fluent traversals: + +* [explicitly](explicit/) by wrapping a traversal into a statement and passing it to + `session.execute`; +* [implicitly](implicit/) by building the traversal from a connected source, and calling a + terminal step. + +### Common topics + +The following apply regardless of the execution model: + +#### Limitations + +At the time of writing (DSE 6.0 / driver 4.0), some types of queries cannot be executed through the +fluent API: + +* system queries (e.g. creating / dropping a graph); +* configuration; +* DSE graph schema queries. + +You'll have to use the [script API](../script) for those use cases. + +#### Performance considerations + +Before sending a fluent graph statement over the network, the driver serializes the Gremlin +traversal into a byte array. **Traversal serialization happens on the client thread, even in +asynchronous mode**. In other words, it is done on: + +* the thread that calls `session.execute` or `session.executeAsync` for explicit execution; +* the thread that calls the terminal step for implicit execution. + +In practice, this shouldn't be an issue, but we've seen it become problematic in some corner cases +of our performance benchmarks: if a single thread issues a lot of `session.executeAsync` calls in a +tight loop, traversal serialization can dominate CPU usage on that thread, and become a bottleneck +for request throughput. + +If you believe that you're running into that scenario, start by profiling your application to +confirm that the client thread maxes out its CPU core; to solve the problem, distribute your +`session.executeAsync` calls onto more threads. + +#### Domain specific languages + +Gremlin can be extended with domain specific languages to make traversals more natural to write. For +example, considering the following query: + +```java +g.V().hasLabel("person").has("name", "marko"). + out("knows").hasLabel("person").has("name", "josh"); +``` + +A "social" DSL could be written to simplify it as: + +```java +socialG.persons("marko").knows("josh"); +``` + +TinkerPop provides an annotation processor to generate a DSL from an annotated interface. This is +covered in detail in the [TinkerPop documentation][TinkerPop DSL]. + +Once your custom traversal source is generated, here's how to use it: + +```java +// Non-connected source for explicit execution: +SocialTraversalSource socialG = DseGraph.g.getGraph().traversal(SocialTraversalSource.class); + +// Connected source for implicit execution: +SocialTraversalSource socialG = + DseGraph.g + .withRemote(DseGraph.remoteConnectionBuilder(session).build()) + .getGraph() + .traversal(SocialTraversalSource.class); +``` + +#### Search and geospatial predicates + +All the DSE predicates are available on the driver side: + +* for [search][DSE search], use the [Search] class: + + ```java + GraphTraversal traversal = + g.V().has("recipe", "instructions", Search.token("Saute")).values("name"); + ``` + +* for [geospatial queries][DSE geo], use the [Geo] class: + + ```java + GraphTraversal traversal = + g.V() + .has( + "location", + "point", + Geo.inside(Geo.point(2.352222, 48.856614), 4.2, Geo.Unit.DEGREES)) + .values("name"); + ``` + +[Search]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/predicates/Search.html +[Geo]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/predicates/Geo.html + +[Apache TinkerPop™]: http://tinkerpop.apache.org/ +[TinkerPop DSL]: http://tinkerpop.apache.org/docs/current/reference/#dsl +[DSE search]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/using/useSearchIndexes.html +[DSE geo]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/using/queryGeospatial.html diff --git a/manual/core/dse/graph/fluent/explicit/README.md b/manual/core/dse/graph/fluent/explicit/README.md new file mode 100644 index 00000000000..163180a4a8a --- /dev/null +++ b/manual/core/dse/graph/fluent/explicit/README.md @@ -0,0 +1,132 @@ + + +## Explicit execution + +Fluent traversals can be wrapped into a [FluentGraphStatement] and passed to the session: + +```java +// A "dummy", non-connected traversal source that is not meant to be iterated directly, but instead +// serves as the basis to build fluent statements: +import static com.datastax.dse.driver.api.core.graph.DseGraph.g; + +GraphTraversal traversal = g.V().has("name", "marko"); +FluentGraphStatement statement = FluentGraphStatement.newInstance(traversal); + +GraphResultSet result = session.execute(statement); +for (GraphNode node : result) { + System.out.println(node.asVertex()); +} +``` + +### Creating fluent statements + +#### Factory method + +As shown above, [FluentGraphStatement.newInstance] creates a statement from a traversal directly. + +The default implementation returned by the driver is **immutable**; if you call additional methods +on the statement -- for example to set [options](../../options/) -- each method call will create a +new copy: + +```java +FluentGraphStatement statement = FluentGraphStatement.newInstance(traversal); +FluentGraphStatement statement2 = statement.setTimeout(Duration.ofSeconds(10)); + +assert statement2 != statement; +``` + +Immutability is good because it makes statements inherently **thread-safe**: you can share them in +your application and access them concurrently without any risk. + +On the other hand, it means a lot of intermediary copies if you often call methods on your +statements. Modern VMs are normally good at dealing with such short-lived objects, but if you're +worried about the performance impact, consider using a builder instead. + +Note: contrary to driver statements, Tinkerpop's `GraphTraversal` is mutable and therefore not +thread-safe. This is fine if you just wrap a traversal into a statement and never modify it +afterwards, but be careful not to share traversals and modify them concurrently. + +#### Builder + +Instead of creating a statement directly, you can pass your traversal to +[FluentGraphStatement.builder], chain method calls to set options, and finally call `build()`: + +```java +FluentGraphStatement statement1 = + FluentGraphStatement.builder(traversal) + .withTimeout(Duration.ofSeconds(10)) + .withIdempotence(true) + .build(); +``` + +The builder implementation is **mutable**: every method call returns the same object, only one +builder instance will be created no matter how many methods you call on it. As a consequence, the +builder object is **not thread-safe**. + +You can also initialize a builder from an existing statement: it will inherit all of its options. + +```java +FluentGraphStatement statement2 = + FluentGraphStatement.builder(statement1).withTimeout(Duration.ofSeconds(20)).build(); + +assert statement2.getTraversal().equals(statement1.getTraversal()); +assert statement2.getTimeout().equals(Duration.ofSeconds(20)); // overridden by the builder +assert statement2.isIdempotent(); // because statement1 was +``` + +### Batching traversals + +[BatchGraphStatement] allows you to execute multiple mutating traversals in the same transaction. +Like other types of statements, it is immutable and thread-safe, and can be created either with a +[factory method][BatchGraphStatement.newInstance] or a [builder][BatchGraphStatement.builder]: + +```java +GraphTraversal traversal1 = g.addV("person").property("name", "batch1").property("age", 1); +GraphTraversal traversal2 = g.addV("person").property("name", "batch2").property("age", 2); + +// Each method call creates a copy: +BatchGraphStatement batch1 = BatchGraphStatement.newInstance() + .addTraversal(traversal1) + .addTraversal(traversal2); + +// Uses a single, mutable builder instance: +BatchGraphStatement batch2 = BatchGraphStatement.builder() + .addTraversal(traversal1) + .addTraversal(traversal2) + .build(); +``` + +Traversal batches are only available with DSE 6.0 or above. + +### Prepared statements + +At the time of writing (DSE 6.0), prepared graph statements are not supported yet; they will be +added in a future version. + +----- + +See also the [parent page](../) for topics common to all fluent traversals. + +[FluentGraphStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.html +[FluentGraphStatement.newInstance]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.html#newInstance-org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal- +[FluentGraphStatement.builder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/FluentGraphStatement.html#builder-org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal- +[BatchGraphStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.html +[BatchGraphStatement.newInstance]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.html#newInstance-- +[BatchGraphStatement.builder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/BatchGraphStatement.html#builder-- diff --git a/manual/core/dse/graph/fluent/implicit/README.md b/manual/core/dse/graph/fluent/implicit/README.md new file mode 100644 index 00000000000..f838c376022 --- /dev/null +++ b/manual/core/dse/graph/fluent/implicit/README.md @@ -0,0 +1,71 @@ + + +## Implicit execution + +Instead of passing traversals to the driver, you can create a *remote traversal source* connected to +the DSE cluster: + +```java +CqlSession session = CqlSession.builder().build(); + +GraphTraversalSource g = + AnonymousTraversalSource.traversal().withRemote(DseGraph.remoteConnectionBuilder(session).build()); +``` + +Then build traversals from that source. Whenever you reach a [terminal step] \(such as `next()`, +`toList()`...), the DSE driver will be invoked under the covers: + +```java +List vertices = g.V().has("name", "marko").toList(); +``` + +This lets you use the traversal as if it were working against a local graph; all the communication +with DSE is done transparently. Note however that the returned objects (vertices, edges...) are +completely *detached*: even though they contain the complete data, modifications made to them will +not be reflected on the server side. + +Traversal sources with different configurations can easily be created through execution profiles in +the [configuration](../../../../configuration/): + +``` +datastax-java-driver { + profiles { + graph-oltp { + basic.graph.traversal-source = a + basic.graph.timeout = 30 seconds + } + } +} +``` + +Pass the profile name to the remote connection builder: + +```java +GraphTraversalSource a = AnonymousTraversalSource.traversal().withRemote( + DseGraph.remoteConnectionBuilder(session) + .withExecutionProfileName("graph-oltp") + .build()); +``` + +----- + +See also the [parent page](../) for topics common to all fluent traversals. + +[terminal step]: http://tinkerpop.apache.org/docs/current/reference/#terminal-steps diff --git a/manual/core/dse/graph/options/README.md b/manual/core/dse/graph/options/README.md new file mode 100644 index 00000000000..e4649ff34f3 --- /dev/null +++ b/manual/core/dse/graph/options/README.md @@ -0,0 +1,179 @@ + + +## Graph options + +There are various [configuration](../../../configuration/) options that control the execution of +graph statements. They can also be overridden programmatically on individual statements. + +### Setting options + +Given the following configuration: + +``` +datastax-java-driver { + + basic.graph.timeout = 3 seconds + + profiles { + graph-oltp { + basic.graph.timeout = 30 seconds + } + } +} +``` + +This statement inherits the timeout from the default profile: + +```java +ScriptGraphStatement statement = ScriptGraphStatement.newInstance("g.V().next()"); +assert statement.getTimeout().equals(Duration.ofSeconds(3)); +``` + +This statement inherits the timeout from a named profile: + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("g.V().next()").setExecutionProfileName("graph-oltp"); +assert statement.getTimeout().equals(Duration.ofSeconds(30)); +``` + +This statement overrides the timeout programmatically; that takes precedence over the configuration: + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("g.V().next()").setTimeout(Duration.ofSeconds(5)); +``` + +Programmatic overrides are also available in statement builders: + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.builder("g.V().next()").withTimeout(Duration.ofSeconds(5)).build(); +``` + +Whether you use the configuration or programmatic API depends on the use case; in general, we +recommend trying execution profiles first, if you can identify static categories of statements that +share the same options. Resort to the API for specific options that only apply to a single +statement, or if the value is only known at runtime. + +### Available options + +#### Graph name + +The `basic.graph.name` option defines the name of the graph you're querying. + +This doesn't have to be set all the time. In fact, some queries explicitly require no graph name, +for example those that access the `system` query. If you try to execute them with a graph name set, +you'll get an error: + +```java +// Don't do this: executing a system query with the graph name set +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()") + .setGraphName("test"); +session.execute(statement); +// InvalidQueryException: No such property: system for class: Script2 +``` + +If you set the graph name globally in the configuration, you'll need to unset it for system queries. +To do that, set it to `null`, or use the more explicit equivalent `is-system-query`: + +``` +datastax-java-driver { + basic.graph.name = my_graph + + profiles { + graph-system { + # Don't inherit the graph name here + basic.graph.is-system-query = true + } + } +} +``` + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()") + .setExecutionProfileName("graph-system"); + +// Programmatic alternative: +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()") + .setSystemQuery(true); +``` + +#### Traversal source + +`basic.graph.traversal-source` defines the underlying engine used to create traversals. + +Set this to `g` for regular OLTP queries, or `a` for OLAP queries. + +#### Consistency level + +Graph statements use the same option as CQL: `basic.request.consistency`. + +However, DSE graph also provides a finer level of tuning: a single traversal may produce multiple +internal storage queries, some of which are reads, and others writes. The read and write consistency +levels can be configured independently with `basic.graph.read-consistency` and +`basic.graph.write-consistency`. + +If any of these is set, it overrides the consistency level for that type of query; otherwise, the +global option is used. + +#### Timeout + +Graph statements have a dedicated timeout option: `basic.graph.timeout`. This is because the timeout +behaves a bit differently with DSE graph: by default, it is unset and the driver will wait until the +server replies (there are server-side timeouts that limit how long the request will take). + +If a timeout is defined on the client, the driver will fail the request after that time, without +waiting for a reply. But the timeout is also sent alongside the initial request, and the server will +adjust its own timeout to ensure that it doesn't keep working for a result that the client is no +longer waiting for. + +#### Graph protocol version + +DSE graph relies on the Cassandra native protocol, but it extends it with a sub-protocol that has +its own versioning scheme. + +`advanced.graph.sub-protocol` controls the graph protocol version to use for each statement. It is +unset by default, and you should almost never have to change it: the driver sets it automatically +based on the information it knows about the server. + +There is one exception: if you use the [script API](../script/) against a legacy DSE version (5.0.3 +or older), the driver infers the wrong protocol version. This manifests as a `ClassCastException` +when you try to deserialize complex result objects, such as vertices: + +```java +GraphResultSet result = + session.execute(ScriptGraphStatement.newInstance("g.V().next()")); +result.one().asVertex(); +// ClassCastException: java.util.LinkedHashMap cannot be cast to org.apache.tinkerpop.gremlin.structure.Vertex +``` + +If you run into that situation, force the sub-protocol to `graphson-1.0` for script statements +(that's not necessary for fluent statements). + +Currently, if the Graph sub-protocol version is not specified on a given GraphStatement, and it's +not explicitly set through `advanced.graph.sub-protocol` in configuration, the version of DSE to +which the driver is connected will determine the default sub-protocol version used by the driver. +For DSE 6.8.0 and later, the driver will pick "graph-binary-1.0" as the default sub-protocol +version. For DSE 6.7.x and older (or in cases where the driver can't determine the DSE version), the +driver will pick "graphson-2.0" as the default sub-protocol version. diff --git a/manual/core/dse/graph/results/README.md b/manual/core/dse/graph/results/README.md new file mode 100644 index 00000000000..3b4d25fa012 --- /dev/null +++ b/manual/core/dse/graph/results/README.md @@ -0,0 +1,163 @@ + + +## Handling graph results + +[Script queries](../script/) and [explicit fluent traversals](../fluent/explicit/) return graph +result sets, which are essentially iterables of [GraphNode]. + +### Synchronous / asynchronous result + +Like their CQL counterparts, graph result sets come in two forms, depending on the way the query +was executed. + +* `session.execute` returns a [GraphResultSet]. It can be iterated directly, and will return the + whole result set, triggering background fetches if the query is paged: + + ```java + for (GraphNode n : resultSet) { + System.out.println(n); + } + ``` + +* `session.executeAsync` returns an [AsyncGraphResultSet]. It only holds the current page of + results, accessible via the `currentPage()` method. If the query is paged, the next pages must be + fetched explicitly using the `hasMorePages()` and `fetchNextPage()` methods. See [Asynchronous + paging](../../../paging/#asynchronous-paging) for more details about how to work with async + types. + +*Note: at the time of writing (DSE 6.0), graph queries are never paged. Results are always returned +as a single page. However, paging is on the roadmap for a future DSE version; the driver APIs +reflect that, to avoid breaking changes when the feature is introduced.* + +Both types have a `one()` method, to use when you know there is exactly one node, or are only +interested in the first one: + +```java +GraphNode n = resultSet.one(); +``` + +### Working with graph nodes + +[GraphNode] wraps the responses returned by the server. Use the `asXxx()` methods to coerce a node +to a specific type: + +```java +FluentGraphStatement statement = FluentGraphStatement.newInstance(g.V().count()); +GraphNode n = session.execute(statement).one(); +System.out.printf("The graph has %s vertices%n", n.asInt()); +``` + +If the result is an array or "object" (in the JSON sense: a collection of named fields), you can +iterate its children: + +```java +if (n.isList()) { + for (int i = 0; i < n.size(); i++) { + GraphNode child = n.getByIndex(i); + System.out.printf("Element at position %d: %s%n", i, child); + } + + // Alternatively, convert to a list: + List l = n.asList(); +} + +if (n.isMap()) { + for (Object key : n.keys()) { + System.out.printf("Element at key %s: %s%n", key, n.getByKey(key)); + } + + // Alternatively, convert to a map: + Map m = n.asMap(); +} +``` + +#### Graph structural types + +If the traversal returns graph elements (like vertices and edges), the results can be converted to +the corresponding TinkerPop types: + +```java +GraphNode n = session.execute(FluentGraphStatement.newInstance( + g.V().hasLabel("test_vertex") +)).one(); +Vertex vertex = n.asVertex(); + +n = session.execute(FluentGraphStatement.newInstance( + g.V().hasLabel("test_vertex").outE() +)).one(); +Edge edge = n.asEdge(); + +n = session.execute(FluentGraphStatement.newInstance( + g.V().hasLabel("test_vertex") + .outE() + .inV() + .path() +)).one(); +Path path = n.asPath(); + +n = session.execute(FluentGraphStatement.newInstance( + g.V().hasLabel("test_vertex") + .properties("name") +)).one(); +// .properties() returns a list of properties, so we get the first one and transform it as a +// VertexProperty +VertexProperty vertexProperty = n.getByIndex(0).asVertexProperty(); +``` + +#### Data type compatibility matrix + +Dse graph exposes several [data types][DSE data types] when defining a schema for a graph. They +translate into specific Java classes when the data is returned from the server. + +Here is an exhaustive compatibility matrix (for DSE 6.0): + +| DSE graph | Java Driver | +|------------|---------------------| +| bigint | Long | +| blob | byte[] | +| boolean | Boolean | +| date | java.time.LocalDate | +| decimal | BigDecimal | +| double | Double | +| duration | java.time.Duration | +| float | Float | +| inet | InetAddress | +| int | Integer | +| linestring | LineString | +| point | Point | +| polygon | Polygon | +| smallint | Short | +| text | String | +| time | java.time.LocalTime | +| timestamp | java.time.Instant | +| uuid | UUID | +| varint | BigInteger | + +If a type doesn't have a corresponding `asXxx()` method, use the variant that takes a type token: + +```java +UUID uuid = graphNode.as(UUID.class); +``` + +[GraphNode]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/GraphNode.html +[GraphResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/GraphResultSet.html +[AsyncGraphResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/AsyncGraphResultSet.html + +[DSE data types]: https://docs.datastax.com/en/dse/6.0/dse-dev/datastax_enterprise/graph/reference/refDSEGraphDataTypes.html diff --git a/manual/core/dse/graph/script/README.md b/manual/core/dse/graph/script/README.md new file mode 100644 index 00000000000..cec8e4e94ef --- /dev/null +++ b/manual/core/dse/graph/script/README.md @@ -0,0 +1,125 @@ + + +## Script API + +The script API handles Gremlin-groovy requests provided as plain Java strings. To execute a script, +wrap it into a [ScriptGraphStatement] and pass it to the session: + +```java +CqlSession session = CqlSession.builder().build(); + +String groovyScript = "system.graph('demo').ifNotExists().create()"; +ScriptGraphStatement statement = ScriptGraphStatement.newInstance(groovyScript); +session.execute(statement); +``` + +### Creating script statements + +#### Factory method + +As demonstrated above, the simplest way to create a script statement is to pass the Gremlin-groovy +string to [ScriptGraphStatement.newInstance]. + +The default implementation returned by the driver is **immutable**; if you call additional methods +on the statement -- for example to set [options](../options/) -- each method call will create a new +copy: + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("system.graph('demo').ifNotExists().create()"); +ScriptGraphStatement statement2 = statement.setTimeout(Duration.ofSeconds(10)); + +assert statement2 != statement; +``` + +Immutability is good because it makes statements inherently **thread-safe**: you can share them in +your application and access them concurrently without any risk. + +On the other hand, it means a lot of intermediary copies if you often call methods on your +statements. Modern VMs are normally good at dealing with such short-lived objects, but if you're +worried about the performance impact, consider using a builder instead. + +#### Builder + +Instead of creating a statement directly, you can pass your Gremlin-groovy string to +[ScriptGraphStatement.builder], chain method calls to set options, and finally call `build()`: + +```java +ScriptGraphStatement statement1 = + ScriptGraphStatement.builder("system.graph('demo').ifNotExists().create()") + .withTimeout(Duration.ofSeconds(10)) + .withIdempotence(true) + .build(); +``` + +The builder implementation is **mutable**: every method call returns the same object, only one +builder instance will be created no matter how many methods you call on it. As a consequence, the +builder object is **not thread-safe**. + +You can also initialize a builder from an existing statement: it will inherit all of its options. + +```java +ScriptGraphStatement statement2 = + ScriptGraphStatement.builder(statement1).withTimeout(Duration.ofSeconds(20)).build(); + +assert statement2.getScript().equals(statement1.getScript()); +assert statement2.getTimeout().equals(Duration.ofSeconds(20)); // overridden by the builder +assert statement2.isIdempotent(); // because statement1 was +``` + +### Parameters + +Gremlin-groovy scripts accept parameters, which are always named. Note that, unlike in CQL, +placeholders are not prefixed with ":". + +To manage parameters on an existing statement, use `setQueryParam` / `removeQueryParam`: + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.newInstance("g.addV(label, vertexLabel)") + .setQueryParam("vertexLabel", "test_vertex_2"); +``` + +On the builder, use `withQueryParam` / `withoutQueryParams`: + +```java +ScriptGraphStatement statement = + ScriptGraphStatement.builder("g.addV(label, vertexLabel)") + .withQueryParam("vertexLabel", "test_vertex_2") + .build(); +``` + +Alternatively, `withQueryParams` takes multiple parameters as a map. + +### Use cases for the script API + +Building requests as Java strings can be unwieldy, especially for long scripts. Besides, the script +API is a bit less performant on the server side. Therefore we recommend the +[Fluent API](../fluent/) instead for graph traversals. + +Note however that some types of queries can only be performed through the script API: + +* system queries (e.g. creating / dropping a graph); +* configuration; +* DSE graph schema queries. + +[ScriptGraphStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.html +[ScriptGraphStatement.newInstance]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.html#newInstance-java.lang.String- +[ScriptGraphStatement.builder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/graph/ScriptGraphStatement.html#builder-java.lang.String- diff --git a/manual/core/graalvm/README.md b/manual/core/graalvm/README.md new file mode 100644 index 00000000000..d20fb739f19 --- /dev/null +++ b/manual/core/graalvm/README.md @@ -0,0 +1,334 @@ + + +## GraalVM native images + +### Quick overview + +* [GraalVM native images](https://www.graalvm.org/reference-manual/native-image/) can be built with + no additional configuration starting with driver 4.13.0. +* But extra configurations are required in a few cases: + * When using [reactive programming](../reactive); + * When using [Jackson](../integration#Jackson); + * When using LZ4 [compression](../compression/); + * Depending on the [logging backend](../logging) in use. +* DSE-specific features: + * [Geospatial types](../dse/geotypes) are supported. + * [DSE Graph](../dse/graph) is not officially supported, although it may work. +* The [shaded jar](../shaded_jar) is not officially supported, although it may work. + +----- + +### Concepts + +Starting with version 4.13.0, the driver ships with [embedded GraalVM configuration files] that +allow GraalVM native images including the driver to be built without hassle, barring a few +exceptions and caveats listed below. + +[embedded GraalVM configuration files]:https://www.graalvm.org/reference-manual/native-image/BuildConfiguration/#embedding-a-configuration-file + +### Classes instantiated by reflection + +The driver instantiates its components by reflection. The actual classes that will be instantiated +in this way need to be registered for reflection. All built-in implementations of various driver +components, such as `LoadBalancingPolicy` or `TimestampGenerator`, are automatically registered for +reflection, along with a few other internal components tha are also instantiated by reflection. +_You don't need to manually register any of these built-in implementations_. + +But if you intend to use a custom implementation in lieu of a driver built-in class, then it is your +responsibility to register that custom implementation for reflection. + +For example, assuming that you have the following load balancing policy implementation: + +```java + +package com.example.app; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; + +public class CustomLoadBalancingPolicy extends DefaultLoadBalancingPolicy { + + public CustomLoadBalancingPolicy(DriverContext context, String profileName) { + super(context, profileName); + } + // rest of class omitted for brevity +} +``` + +And assuming that you declared the above class in your application.conf file as follows: + +```hocon +datastax-java-driver.basic{ + load-balancing-policy.class = com.example.app.CustomLoadBalancingPolicy +} +``` + +Then you will have to register that class for reflection: + +1. Create the following reflection.json file, or add the entry to an existing file: + +```json +[ + { "name": "com.example.app.CustomLoadBalancingPolicy", "allPublicConstructors": true } +] +``` + +2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` + flag and point it to the file created above. + +Note: some frameworks allow you to simplify the registration process. For example, Quarkus offers +the `io.quarkus.runtime.annotations.RegisterForReflection` annotation that you can use to annotate +your class: + +```java +@RegisterForReflection +public class CustomLoadBalancingPolicy extends DefaultLoadBalancingPolicy { + //... +} +``` + +In this case, no other manual configuration is required for the above class to be correctly +registered for reflection. + +### Configuration resources + +The default driver [configuration](../configuration) mechanism is based on the TypeSafe Config +library. TypeSafe Config looks for a few classpath resources when initializing the configuration: +`reference.conf`, `application.conf`, `application.json`, `application.properties`. _These classpath +resources are all automatically included in the native image: you should not need to do it +manually_. See [Accessing Resources in Native Images] for more information on how classpath +resources are handled in native images. + +[Accessing Resources in Native Images]: https://www.graalvm.org/reference-manual/native-image/Resources/ + +### Configuring the logging backend + +When configuring [logging](../logging), the choice of a backend must be considered carefully, as +most logging backends resort to reflection during their configuration phase. + +By default, GraalVM native images provide support for the java.util.logging (JUL) backend. See +[this page](https://www.graalvm.org/reference-manual/native-image/Logging/) for more information. + +For other logging backends, please refer to the logging library documentation to find out if GraalVM +native images are supported. + +### Using reactive-style programming + +The [reactive execution model](../reactive) is compatible with GraalVM native images, but the +following configurations must be added: + +1. Create the following reflection.json file, or add the entry to an existing file: + +```json +[ + { "name": "org.reactivestreams.Publisher" } +] +``` + +2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` + flag and point it to the file created above. + +### Using the Jackson JSON library + +[Jackson](https://github.com/FasterXML/jackson) is used in [a few places](../integration#jackson) in +the driver, but is an optional dependency; if you intend to use Jackson, the following +configurations must be added: + +1. Create the following reflection.json file, or add these entries to an existing file: + +```json +[ + { "name": "com.fasterxml.jackson.core.JsonParser" }, + { "name": "com.fasterxml.jackson.databind.ObjectMapper" } +] +``` + +**Important**: when using the shaded jar – which is not officially supported on GraalVM native +images, see below for more details – replace the above entries with the below ones: + +```json +[ + { "name": "com.datastax.oss.driver.shaded.fasterxml.jackson.core.JsonParser" }, + { "name": "com.datastax.oss.driver.shaded.fasterxml.jackson.databind.ObjectMapper" } +] +``` +2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` + flag and point it to the file created above. + +### Enabling compression + +When using [compression](../compression/), only LZ4 can be enabled in native images. **Snappy +compression is not supported.** + +In order for LZ4 compression to work in a native image, the following additional GraalVM +configuration is required: + +1. Create the following reflection.json file, or add these entries to an existing file: + +```json +[ + { "name" : "net.jpountz.lz4.LZ4Compressor" }, + { + "name" : "net.jpountz.lz4.LZ4JNICompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4JavaSafeCompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4JavaUnsafeCompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4HCJavaSafeCompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4HCJavaUnsafeCompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4JavaSafeSafeDecompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4JavaSafeFastDecompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4JavaUnsafeSafeDecompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + }, + { + "name" : "net.jpountz.lz4.LZ4JavaUnsafeFastDecompressor", + "allDeclaredConstructors": true, + "allPublicFields": true + } +] +``` + +2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` + flag and point it to the file created above. + +### Native calls + +The driver performs a few [native calls](../integration#native-libraries) using +[JNR](https://github.com/jnr). + +Starting with driver 4.7.0, native calls are also possible in a GraalVM native image, without any +extra configuration. + +### Using DataStax Enterprise (DSE) features + +#### DSE Geospatial types + +DSE [Geospatial types](../dse/geotypes) are supported on GraalVM native images; the following +configurations must be added: + +1. Create the following reflection.json file, or add the entry to an existing file: + +```json +[ + { "name": "com.esri.core.geometry.ogc.OGCGeometry" } +] +``` + +**Important**: when using the shaded jar – which is not officially supported on GraalVM native +images, as stated above – replace the above entry with the below one: + +```json +[ + { "name": "com.datastax.oss.driver.shaded.esri.core.geometry.ogc.OGCGeometry" } +] +``` + +2. When invoking the native image builder, add a `-H:ReflectionConfigurationFiles=reflection.json` + flag and point it to the file created above. + +#### DSE Graph + +**[DSE Graph](../dse/graph) is not officially supported on GraalVM native images.** + +The following configuration can be used as a starting point for users wishing to build a native +image for a DSE Graph application. DataStax does not guarantee however that the below configuration +will work in all cases. If the native image build fails, a good option is to use GraalVM's +[Tracing Agent](https://www.graalvm.org/reference-manual/native-image/Agent/) to understand why. + +1. Create the following reflection.json file, or add these entries to an existing file: + +```json +[ + { "name": "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0" }, + { "name": "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal" }, + { "name": "org.apache.tinkerpop.gremlin.structure.Graph", + "allDeclaredConstructors": true, + "allPublicConstructors": true, + "allDeclaredMethods": true, + "allPublicMethods": true + }, + { "name": "org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerGraph", + "allDeclaredConstructors": true, + "allPublicConstructors": true, + "allDeclaredMethods": true, + "allPublicMethods": true + }, + { "name": " org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph", + "allDeclaredConstructors": true, + "allPublicConstructors": true, + "allDeclaredMethods": true, + "allPublicMethods": true + }, + { "name": "org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource", + "allDeclaredConstructors": true, + "allPublicConstructors": true, + "allDeclaredMethods": true, + "allPublicMethods": true + } +] +``` + +2. When invoking the native image builder, add the following flags: + +``` +-H:ReflectionConfigurationFiles=reflection.json +--initialize-at-build-time=org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0 +--initialize-at-build-time=org.apache.tinkerpop.shaded.jackson.databind.deser.std.StdDeserializer +``` + +### Using the shaded jar + +**The [shaded jar](../shaded_jar) is not officially supported in a GraalVM native image.** + +However, it has been reported that the shaded jar can be included in a GraalVM native image as a +drop-in replacement for the regular driver jar for simple applications, without any extra GraalVM +configuration. diff --git a/manual/core/idempotence/README.md b/manual/core/idempotence/README.md index 59d45d5113f..be784dfa40b 100644 --- a/manual/core/idempotence/README.md +++ b/manual/core/idempotence/README.md @@ -1,8 +1,35 @@ + + ## Query idempotence +### Quick overview + A request is *idempotent* if executing it multiple times leaves the database in the same state as executing it only once. +* `basic.request.default-idempotence` in the configuration (defaults to false). +* can be overridden per statement [Statement.setIdempotent] or [StatementBuilder.setIdempotence]. +* retries and speculative executions only happen for idempotent statements. + +----- + For example: * `update my_table set list_col = [1] where pk = 1` is idempotent: no matter how many times it gets @@ -37,7 +64,7 @@ SimpleStatement statement = ``` If you don't, they default to the value defined in the [configuration](../configuration/) by the -`request.default-idempotence` option; out of the box, it is set to `false`. +`basic.request.default-idempotence` option; out of the box, it is set to `false`. When you prepare a statement, its idempotence carries over to bound statements: @@ -51,3 +78,6 @@ assert bs.isIdempotent(); The query builder tries to infer idempotence automatically; refer to [its manual](../../query_builder/idempotence/) for more details. + +[Statement.setIdempotent]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setIdempotent-java.lang.Boolean- +[StatementBuilder.setIdempotence]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/StatementBuilder.html#setIdempotence-java.lang.Boolean- diff --git a/manual/core/integration/README.md b/manual/core/integration/README.md index e7cbcea9095..e2c7bc218ee 100644 --- a/manual/core/integration/README.md +++ b/manual/core/integration/README.md @@ -1,6 +1,118 @@ + + ## Integration -This page contains various information on how to integrate the driver in your application. +### Quick overview + +* sample project structures for Maven and Gradle. +* explanations about [driver dependencies](#driver-dependencies) and when they can be manually + excluded. + +Note: guidelines to build a GraalVM native image can be found [here](../graalvm). + +----- + +### Which artifact(s) should I use? + +There are multiple driver artifacts under the group id +[com.datastax.oss](https://search.maven.org/search?q=g:com.datastax.oss). Here's how to pick the +right dependencies: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        FeatureArtifact(s)Comments
        + Core functionality: executing queries with CqlSession.execute(), processing the + results with ResultSet, etc. + java‑driver‑core
        + Same as the above, but without explicit dependencies to Netty, + Jackson or ESRI. + java‑driver‑core‑shaded + Replaces java‑driver‑core.
        + See this page. +
        + Query builder: generating CQL query strings programmatically. + java‑driver‑query‑builder
        + Object mapper: generating the boilerplate to execute queries and + convert the results into your own domain classes. + + java‑driver‑mapper‑processor
        + java‑driver‑mapper‑runtime +
        + Both artifacts are needed.
        + See this page. +
        + Instrumenting the driver and gathering metrics using the Micrometer metrics library. + java‑driver‑metrics‑micrometerSee this page.
        + Instrumenting the driver and gathering metrics using the MicroProfile Metrics library. + java‑driver‑metrics‑microprofileSee this page.
        + "Bill Of Materials": can help manage versions if you use multiple driver artifacts. + java‑driver‑bomSee this page.
        + Writing integration tests that run the driver against Cassandra or Simulacron. + java‑driver‑test‑infra + Those APIs are not covered in this manual, but you can look at the driver's contribution + guidelines and internal tests for + guidance. +
        ### Minimal project structure @@ -37,9 +149,9 @@ dependencies, and tell Maven that we're going to use Java 8: - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} ch.qos.logback @@ -144,7 +256,7 @@ You should see output similar to: [INFO] Nothing to compile - all classes are up to date [INFO] [INFO] --- exec-maven-plugin:1.3.1:java (default-cli) @ yourapp --- -11:39:45.355 [Main.main()] INFO c.d.o.d.i.c.DefaultMavenCoordinates - DataStax Java driver for Apache Cassandra(R) (com.datastax.oss:java-driver-core) version 4.0.1 +11:39:45.355 [Main.main()] INFO c.d.o.d.i.c.DefaultMavenCoordinates - Apache Cassandra Java Driver (com.datastax.oss:java-driver-core) version 4.0.1 11:39:45.648 [poc-admin-0] INFO c.d.o.d.internal.core.time.Clock - Using native clock for microsecond precision 11:39:45.649 [poc-admin-0] INFO c.d.o.d.i.c.metadata.MetadataManager - [poc] No contact points provided, defaulting to /127.0.0.1:9042 3.11.2 @@ -176,7 +288,7 @@ repositories { } dependencies { - compile group: 'com.datastax.oss', name: 'java-driver-core', version: '4.1.0' + compile group: 'com.datastax.oss', name: 'java-driver-core', version: '${driver.version}' compile group: 'ch.qos.logback', name: 'logback-classic', version: '1.2.3' } ``` @@ -214,7 +326,7 @@ $ ./gradlew execute :processResources :classes :execute -13:32:25.339 [main] INFO c.d.o.d.i.c.DefaultMavenCoordinates - DataStax Java driver for Apache Cassandra(R) (com.datastax.oss:java-driver-core) version 4.0.1-alpha4-SNAPSHOT +13:32:25.339 [main] INFO c.d.o.d.i.c.DefaultMavenCoordinates - Apache Cassandra Java Driver (com.datastax.oss:java-driver-core) version 4.0.1-alpha4-SNAPSHOT 13:32:25.682 [poc-admin-0] INFO c.d.o.d.internal.core.time.Clock - Using native clock for microsecond precision 13:32:25.683 [poc-admin-0] INFO c.d.o.d.i.c.metadata.MetadataManager - [poc] No contact points provided, defaulting to /127.0.0.1:9042 3.11.2 @@ -230,6 +342,10 @@ If your build tool can't fetch dependencies from Maven central, we publish a bin The driver and its dependencies must be in the compile-time classpath. Application resources, such as `application.conf` and `logback.xml` in our previous examples, must be in the runtime classpath. +### JPMS support + +All the driver's artifacts are JPMS automatic modules. + ### Driver dependencies The driver depends on a number of third-party libraries; some of those dependencies are opt-in, @@ -258,9 +374,9 @@ In that case, you can exclude the dependency: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} com.typesafe @@ -278,22 +394,18 @@ The driver performs native calls with [JNR](https://github.com/jnr). This is use * to get the process ID when generating [UUIDs][Uuids]. In both cases, this is completely optional; if system calls are not available on the current -platform, or the libraries fail to load for any reason, the driver falls back to pure Java +platform, or the library fails to load for any reason, the driver falls back to pure Java workarounds. If you don't want to use system calls, or already know (from looking at the driver's logs) that they -are not available on your platform, you can exclude the following dependencies: +are not available on your platform, you can exclude the following dependency: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} - - com.github.jnr - jnr-ffi - com.github.jnr jnr-posix @@ -304,7 +416,7 @@ are not available on your platform, you can exclude the following dependencies: #### Compression libraries -The driver supports compression with either [LZ4](https://github.com/jpountz/lz4-java) or +The driver supports compression with either [LZ4](https://github.com/yawkat/lz4-java) or [Snappy](http://google.github.io/snappy/). These dependencies are optional; you have to add them explicitly in your application in order to @@ -313,16 +425,17 @@ enable compression. See the [Compression](../compression/) page for more details #### Metrics The driver exposes [metrics](../metrics/) through the -[Dropwizard](http://metrics.dropwizard.io/4.0.0/manual/index.html) library. +[Dropwizard](http://metrics.dropwizard.io/4.1.2/) library. -The dependency is declared as required, but metrics are optional. If you've disabled all metrics, -and never call [Session.getMetrics] anywhere in your application, you can remove the dependency: +The dependency is declared as required, but metrics are optional. If you've disabled all metrics, or +if you are using a different metrics library, and you never call [Session.getMetrics] anywhere in +your application, then you can remove the dependency: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} io.dropwizard.metrics @@ -332,18 +445,20 @@ and never call [Session.getMetrics] anywhere in your application, you can remove ``` -In addition, "timer" metrics use [HdrHistogram](http://hdrhistogram.github.io/HdrHistogram/) to -record latency percentiles. At the time of writing, these metrics are: `cql-requests`, -`throttling.delay` and `cql-messages`; you can also identify them by reading the comments in the -[configuration reference](../configuration/reference/) (look for "exposed as a Timer"). +In addition, when using Dropwizard, "timer" metrics use +[HdrHistogram](http://hdrhistogram.github.io/HdrHistogram/) to record latency percentiles. At the +time of writing, these metrics are: `cql-requests`, `throttling.delay` and `cql-messages`; you can +also identify them by reading the comments in the [configuration +reference](../configuration/reference/) (look for "exposed as a Timer"). -If all of these metrics are disabled, you can remove the dependency: +If all of these metrics are disabled, or if you use a different metrics library, you can remove the +dependency: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} org.hdrhistogram @@ -353,36 +468,185 @@ If all of these metrics are disabled, you can remove the dependency: ``` -#### Documenting annotations +#### Jackson -The driver team uses annotations to document certain aspects of the code: +[Jackson](https://github.com/FasterXML/jackson) is used: -* thread safety with [Java Concurrency in Practice](http://jcip.net/annotations/doc/index.html) - annotations `@Immutable`, `@ThreadSafe`, `@NotThreadSafe` and `@GuardedBy`; -* nullability with [SpotBugs](https://spotbugs.github.io/) annotations `@Nullable` and `@NonNull`. +* when connecting to [DataStax Astra](../../cloud/); +* when Insights monitoring is enabled; +* when [Json codecs](../custom_codecs) are being used. + +Jackson is declared as a required dependency, but the driver can operate normally without it. If you +don't use any of the above features, you can safely exclude the dependency: -This is mostly used during development; while these annotations are retained in class files, they -serve no purpose at runtime. If you want to minimize the number of JARs in your classpath, you can -exclude them: +```xml + + org.apache.cassandra + java-driver-core + ${driver.version} + + + com.fasterxml.jackson.core + * + + + +``` + +#### Esri + +Our [geospatial types](../dse/geotypes/) implementation is based on the [Esri Geometry +API](https://github.com/Esri/geometry-api-java). + +For driver versions >= 4.4.0 and < 4.14.0 Esri is declared as a required dependency, +although the driver can operate normally without it. If you don't use geospatial types +anywhere in your application you can exclude the dependency: + +```xml + + org.apache.cassandra + java-driver-core + ${driver.version} + + + com.esri.geometry + * + + + +``` + +Starting with driver 4.14.0 Esri has been changed to an optional dependency. You no longer have to +explicitly exclude the dependency if it's not used, but if you do wish to make use of the Esri +library you must now explicitly specify it as a dependency : + +```xml + + com.esri.geometry + esri-geometry-api + ${esri.version} + +``` + +In the dependency specification above you should use any 1.2.x version of Esri (we recommend +1.2.1). These versions are older than the current 2.x versions of the library but they are +guaranteed to be fully compatible with DSE. + +#### TinkerPop + +[Apache TinkerPop™](http://tinkerpop.apache.org/) is used in our [graph API](../dse/graph/), +introduced in the OSS driver in version 4.4.0 (it was previously a feature only available in the +now-retired DSE driver). + +For driver versions ranging from 4.4.0 to 4.9.0 inclusive, TinkerPop is declared as a required +dependency, but the driver can operate normally without it. If you don't use the graph API at all, +you can exclude the TinkerPop dependencies: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core - 4.1.0 + ${driver.version} - com.github.stephenc.jcip - jcip-annotations + org.apache.tinkerpop + * + + +``` + +Starting with driver 4.10 however, TinkerPop switched to an optional dependency. Excluding TinkerPop +explicitly is not required anymore if you don't use it. _If you do use the graph API though, you now +need to explicitly include the dependencies below in your application_: + +```xml + + org.apache.tinkerpop + gremlin-core + ${tinkerpop.version} + + + org.apache.tinkerpop + tinkergraph-gremlin + ${tinkerpop.version} + +``` + +If you do use graph, it is important to keep the precise TinkerPop version that the driver depends +on: unlike the driver, TinkerPop does not follow semantic versioning, so even a patch version change +(e.g. 3.3.0 vs 3.3.3) could introduce incompatibilities. + +Here are the recommended TinkerPop versions for each driver version: + + + + + + + + + + + + + + + + + + +
        Driver versionTinkerPop version
        4.17.03.5.3
        4.16.03.5.3
        4.15.03.5.3
        4.14.13.5.3
        4.14.03.4.10
        4.13.03.4.10
        4.12.03.4.10
        4.11.03.4.10
        4.10.03.4.9
        4.9.03.4.8
        4.8.03.4.5
        4.7.03.4.5
        4.6.03.4.5
        4.5.03.4.5
        4.4.03.3.3
        + +#### Reactive Streams + +[Reactive Streams](https://www.reactive-streams.org/) types are referenced in our [reactive +API](../reactive/). + +The Reactive Streams API is declared as a required dependency, but the driver can operate normally +without it. If you never call any of the `executeReactive` methods, you can exclude the dependency: + +```xml + + org.apache.cassandra + java-driver-core + ${driver.version} + - com.github.spotbugs - spotbugs-annotations + org.reactivestreams + reactive-streams ``` +#### Documenting annotations + +The driver team uses annotations to document certain aspects of the code: + +* thread safety with [Java Concurrency in Practice](http://jcip.net/annotations/doc/index.html) + annotations `@Immutable`, `@ThreadSafe`, `@NotThreadSafe` and `@GuardedBy`; +* nullability with [SpotBugs](https://spotbugs.github.io/) annotations `@Nullable` and `@NonNull`. + +This is mostly used during development; while these annotations are retained in class files, they +serve no purpose at runtime. This class is an optional dependency of the driver. If you wish to +make use of these annotations in your own code you have to explicitly depend on these jars: + +```xml + + + com.github.stephenc.jcip + jcip-annotations + 1.0-1 + + + com.github.spotbugs + spotbugs-annotations + 3.1.12 + + +``` + However, there is one case when excluding those dependencies won't work: if you use [annotation processing] in your build, the Java compiler scans the entire classpath -- including the driver's classes -- and tries to load all declared annotations. If it can't find the class for an annotation, @@ -407,7 +671,7 @@ The remaining core driver dependencies are the only ones that are truly mandator * the [native protocol](https://github.com/datastax/native-protocol) layer. This is essentially part of the driver code, but was externalized for reuse in other projects; -* `java-driver-shaded-guava`, a shaded version of [Guava](https://github.com/google/guava). It is +* `java-driver-guava-shaded`, a shaded version of [Guava](https://github.com/google/guava). It is relocated to a different package, and only used by internal driver code, so it should be completely transparent to third-party code; * the [SLF4J](https://www.slf4j.org/) API for [logging](../logging/). @@ -419,6 +683,6 @@ The remaining core driver dependencies are the only ones that are truly mandator [guava]: https://github.com/google/guava/issues/2721 [annotation processing]: https://docs.oracle.com/javase/8/docs/technotes/tools/windows/javac.html#sthref65 -[Session.getMetrics]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html#getMetrics-- -[SessionBuilder.addContactPoint]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoint-java.net.InetSocketAddress- -[Uuids]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/uuid/Uuids.html +[Session.getMetrics]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#getMetrics-- +[SessionBuilder.addContactPoint]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addContactPoint-java.net.InetSocketAddress- +[Uuids]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/uuid/Uuids.html diff --git a/manual/core/load_balancing/README.md b/manual/core/load_balancing/README.md index e7dd4d8962e..3f391c14f56 100644 --- a/manual/core/load_balancing/README.md +++ b/manual/core/load_balancing/README.md @@ -1,5 +1,34 @@ + + ## Load balancing +### Quick overview + +Which nodes the driver talks to, and in which order they are tried. + +* `basic.load-balancing-policy` in the configuration. +* defaults to `DefaultLoadBalancingPolicy` (opinionated best practices). +* can have per-profile policies. + +----- + A Cassandra cluster is typically composed of multiple nodes; the *load balancing policy* (sometimes abbreviated LBP) is a central component that determines: @@ -27,6 +56,9 @@ For each node, the policy computes a *distance* that determines how connections Typically, the distance will reflect network topology (e.g. local vs. remote datacenter), although that is entirely up to each policy implementation. It can also change over time. +The driver built-in policies only ever assign the `LOCAL` or `IGNORED` distance, to avoid cross- +datacenter traffic (see below to understand how to change this behavior). + #### Query plan Each time the driver executes a query, it asks the policy to compute a *query plan*, in other words @@ -40,23 +72,40 @@ return plans that: * only contain nodes that are known to be able to process queries, i.e. neither ignored nor down; * favor local nodes over remote ones. -### Default policy +### Built-in policies In previous versions, the driver provided a wide variety of built-in load balancing policies; in addition, they could be nested into each other, yielding an even higher number of choices. In our experience, this has proven to be too complicated: it's not obvious which policy(ies) to choose for -a given use case, and nested policies can sometimes affect each other's effects in subtle and hard -to predict ways. - -In driver 4+, we are taking a more opinionated approach: we provide a single load balancing policy, -that we consider the best choice for most cases. You can still write a -[custom implementation](#custom-implementation) if you have special requirements. - -#### Local only - -The default policy **only connects to a single datacenter**. The rationale is that a typical -multi-region deployment will collocate one or more application instances with each Cassandra -datacenter: +a given use case, and nested policies can sometimes affect each other's effects in subtle and hard- +to-predict ways. + +In driver 4+, we are taking a different approach: we provide only a handful of load balancing +policies, that we consider the best choices for most cases: + +- `DefaultLoadBalancingPolicy` should almost always be used; it requires a local datacenter to be + specified either programmatically when creating the session, or via the configuration (see below). + It can also use a highly efficient slow replica avoidance mechanism, which is by default enabled. +- `DcInferringLoadBalancingPolicy` is similar to `DefaultLoadBalancingPolicy`, but does not require + a local datacenter to be defined, in which case it will attempt to infer the local datacenter from + the provided contact points. If that's not possible, it will throw an error during session + initialization. This policy is intended mostly for ETL tools and is not recommended for normal + applications. +- `BasicLoadBalancingPolicy` is similar to `DefaultLoadBalancingPolicy`, but does not have the slow + replica avoidance mechanism. More importantly, it is the only policy capable of operating without + local datacenter defined, in which case it will consider nodes in the cluster in a datacenter- + agnostic way. Beware that this could cause spikes in cross-datacenter traffic! This policy is + provided mostly as a starting point for users wishing to implement their own load balancing + policy; it should not be used as is in normal applications. + +You can still write a [custom implementation](#custom-implementation) if you have special +requirements. + +#### Datacenter locality + +By default, both `DefaultLoadBalancingPolicy` and `DcInferringLoadBalancingPolicy` **only connect to +a single datacenter**. The rationale is that a typical multi-region deployment will collocate one or +more application instances with each Cassandra datacenter: ```ditaa /----+----\ @@ -86,14 +135,7 @@ datacenter: +-------------------+ +-------------------+ ``` -In previous driver versions, you could configure application-level failover, such as: "if all the -Cassandra nodes in DC1 are down, allow app1 to connect to the nodes in DC2". We now believe that -this is not the right place to handle this: if a whole datacenter went down at once, it probably -means a catastrophic failure happened in Region1, and the application node is down as well. -Failover should be cross-region instead (handled by the load balancer in this example). - -Therefore the default policy does not allow remote nodes; it only ever assigns the `LOCAL` or -`IGNORED` distance. You **must** provide a local datacenter name, either in the configuration: +When using these policies you **must** provide a local datacenter name, either in the configuration: ``` datastax-java-driver.basic.load-balancing-policy { @@ -116,6 +158,121 @@ that case, the driver will connect to 127.0.0.1:9042, and use that node's datace for a better out-of-the-box experience for users who have just downloaded the driver; beyond that initial development phase, you should provide explicit contact points and a local datacenter. +##### Finding the local datacenter + +To check which datacenters are defined in a given cluster, you can run [`nodetool status`]. It will +print information about each node in the cluster, grouped by datacenters. Here is an example: + +``` +$ nodetool status +Datacenter: DC1 +=============== +Status=Up/Down +|/ State=Normal/Leaving/Joining/Moving +-- Address Load Tokens Owns Host ID Rack +UN 1.5 TB 256 ? rack1 +UN 1.5 TB 256 ? rack2 +UN 1.5 TB 256 ? rack3 + +Datacenter: DC2 +=============== +Status=Up/Down +|/ State=Normal/Leaving/Joining/Moving +-- Address Load Tokens Owns Host ID Rack +UN 1.5 TB 256 ? rack1 +UN 1.5 TB 256 ? rack2 +UN 1.5 TB 256 ? rack3 +``` + +To find out which datacenter should be considered local, you need to first determine which nodes the +driver is going to be co-located with, then choose their datacenter as local. In case of doubt, you +can also use [cqlsh]; if cqlsh is co-located too in the same datacenter, simply run the command +below: + +``` +cqlsh> select data_center from system.local; + +data_center +------------- +DC1 +``` + +#### Cross-datacenter failover + +Since the driver by default only contacts nodes in the local datacenter, what happens if the whole +datacenter is down? Resuming the example shown in the diagram above, shouldn't the driver +temporarily allow app1 to connect to the nodes in DC2? + +We believe that, while appealing by its simplicity, such ability is not the right way to handle a +datacenter failure: resuming our example above, if the whole DC1 datacenter went down at once, it +probably means a catastrophic failure happened in Region1, and the application node is down as well. +Failover should be cross-region instead (handled by the load balancer in the above example). + +However, due to popular demand, starting with driver 4.10, we re-introduced cross-datacenter +failover in the driver built-in load balancing policies. + +Cross-datacenter failover is enabled with the following configuration option: + +``` +datastax-java-driver.advanced.load-balancing-policy.dc-failover { + max-nodes-per-remote-dc = 2 +} +``` + +The default for `max-nodes-per-remote-dc` is zero, which means that failover is disabled. Setting +this option to any value greater than zero will have the following effects: + +- The load balancing policies will assign the `REMOTE` distance to that many nodes *in each remote + datacenter*. +- The driver will then attempt to open connections to those nodes. The actual number of connections + to open to each one of those nodes is configurable, see [Connection pools](../pooling/) for + more details. By default, the driver opens only one connection to each node. +- Those remote nodes (and only those) will then become eligible for inclusion in query plans, + effectively enabling cross-datacenter failover. + +Beware that enabling such failover can result in cross-datacenter network traffic spikes, if the +local datacenter is down or experiencing high latencies! + +Cross-datacenter failover can also have unexpected consequences when using local consistency levels +(LOCAL_ONE, LOCAL_QUORUM and LOCAL_SERIAL). Indeed, a local consistency level may have different +semantics depending on the replication factor (RF) in use in each datacenter: if the local DC has +RF=3 for a given keyspace, but the remote DC has RF=1 for it, achieving LOCAL_QUORUM in the local DC +means 2 replicas required, but in the remote DC, only one will be required. + +For this reason, cross-datacenter failover for local consistency levels is disabled by default. If +you want to enable this and understand the consequences, then set the following option to true: + +``` +datastax-java-driver.advanced.load-balancing-policy.dc-failover { + allow-for-local-consistency-levels = true +} +``` + +##### Alternatives to driver-level cross-datacenter failover + +Before you jump into the failover technique explained above, please also consider the following +alternatives: + +1. **Application-level failover**: instead of letting the driver do the failover, implement the +failover logic in your application. Granted, this solution wouldn't be much better if the +application servers are co-located with the Cassandra datacenter itself. It's also a bit more work, +but at least, you would have full control over the failover procedure: you could for example decide, +based on the exact error that prevented the local datacenter from fulfilling a given request, +whether a failover would make sense, and which remote datacenter to use for that specific request. +Such a fine-grained logic is not possible with a driver-level failover. Besides, if you opt for this +approach, execution profiles can come in handy. See "Using multiple policies" below and also check +our [application-level failover example] for a good starting point. + +2. **Infrastructure-level failover**: in this scenario, the failover is handled by the +infrastructure. To resume our example above, if Region1 goes down, the load balancers in your +infrastructure would transparently switch all the traffic intended for that region to Region2, +possibly scaling up its bandwidth to cope with the network traffic spike. This is by far the best +solution for the cross-datacenter failover issue in general, but we acknowledge that it also +requires a purpose-built infrastructure. To help you explore this option, read our [white paper]. + +[application-level failover example]: https://github.com/datastax/java-driver/blob/4.x/examples/src/main/java/com/datastax/oss/driver/examples/failover/CrossDatacenterFailover.java +[white paper]: https://www.datastax.com/sites/default/files/content/whitepaper/files/2019-09/Designing-Fault-Tolerant-Applications-DataStax.pdf + #### Token-aware The default policy is **token-aware** by default: requests will be routed in priority to the @@ -193,49 +350,63 @@ routing information, you need to provide it manually. ##### Policy behavior When the policy computes a query plan, it first inspects the statement's routing information. If -there isn't any, the query plan is a simple round-robin shuffle of all connected nodes. +there isn't any, the query plan is a simple round-robin shuffle of all connected nodes that are +located in the local datacenter. + +If the statement has routing information, the policy uses it to determine the *local* replicas that +hold the corresponding data. Then it returns a query plan containing these replicas shuffled in +random order, followed by a round-robin shuffle of the rest of the nodes. -If the statement has routing information, the policy uses it to determine the replicas that hold the -corresponding data. Then it returns a query plan containing the replicas shuffled in random order, -followed by a round-robin shuffle of the rest of the nodes. +If cross-datacenter failover has been activated as explained above, some remote nodes may appear in +query plans as well. With the driver built-in policies, remote nodes always come after local nodes +in query plans: this way, if the local datacenter is up, local nodes will be tried first, and remote +nodes are unlikely to ever be queried. If the local datacenter goes down however, all the local +nodes in query plans will likely fail, causing the query plans to eventually try remote nodes +instead. If the local datacenter unavailability persists, local nodes will be eventually marked down +and will be removed from query plans completely from query plans, until they are back up again. -#### Optional node filtering +#### Customizing node distance assignment -Finally, the default policy accepts an optional node filter that gets applied just after the test -for inclusion in the local DC. If a node doesn't pass this test, it will be set at distance -`IGNORED` and the driver will never try to connect to it. This is a good way to exclude nodes on -some custom criteria. +Finally, all the driver the built-in policies accept an optional node distance evaluator that gets +invoked each time a node is added to the cluster or comes back up. If the evaluator returns a +non-null distance for the node, that distance will be used, otherwise the driver will use its +built-in logic to assign a default distance to it. This is a good way to exclude nodes or to adjust +their distance according to custom, dynamic criteria. -You can pass the filter through the configuration: +You can pass the node distance evaluator through the configuration: ``` datastax-java-driver.basic.load-balancing-policy { class = DefaultLoadBalancingPolicy local-datacenter = datacenter1 - filter-class = com.acme.MyNodeFilter + evaluator.class = com.acme.MyNodeDistanceEvaluator } ``` -The filter class must implement `java.util.function.predicate`, and have a public constructor -that takes a [DriverContext] argument: `public MyNodeFilter(DriverContext context)`. +The node distance evaluator class must implement [NodeDistanceEvaluator], and have a public +constructor that takes a [DriverContext] argument: `public MyNodeDistanceEvaluator(DriverContext +context)`. -Sometimes it's more convenient to pass the filter programmatically; you can do that with -`SessionBuilder.withNodeFilter`: +Sometimes it's more convenient to pass the evaluator programmatically; you can do that with +`SessionBuilder.withNodeDistanceEvaluator`: ```java -List whiteList = ... +Map distances = ... CqlSession session = CqlSession.builder() - .withNodeFilter(whiteList::contains) + .withNodeDistanceEvaluator((node, dc) -> distances.get(node)) .build(); ``` -If a programmatic filter is provided, the configuration option is ignored. +If a programmatic node distance evaluator evaluator is provided, the configuration option is +ignored. ### Custom implementation You can use your own implementation by specifying its fully-qualified name in the configuration. -Study the [LoadBalancingPolicy] interface and the default implementation for the low-level details. +Study the [LoadBalancingPolicy] interface and the built-in [BasicLoadingBalancingPolicy] for the +low-level details. Feel free to extend `BasicLoadingBalancingPolicy` and override only the methods +that you wish to modify – but keep in mind that it may be simpler to just start from scratch. ### Using multiple policies @@ -274,8 +445,12 @@ Then it uses the "closest" distance for any given node. For example: * policy1 changes its suggestion to IGNORED. node1 is set to REMOTE; * policy1 changes its suggestion to REMOTE. node1 stays at REMOTE. -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/context/DriverContext.html -[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.html -[getRoutingKeyspace()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKeyspace-- -[getRoutingToken()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Request.html#getRoutingToken-- -[getRoutingKey()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKey-- \ No newline at end of file +[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html +[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/loadbalancing/LoadBalancingPolicy.html +[BasicLoadBalancingPolicy]: https://github.com/datastax/java-driver/blob/4.x/core/src/main/java/com/datastax/oss/driver/internal/core/loadbalancing/BasicLoadBalancingPolicy.java +[getRoutingKeyspace()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKeyspace-- +[getRoutingToken()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingToken-- +[getRoutingKey()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKey-- +[NodeDistanceEvaluator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.html +[`nodetool status`]: https://docs.datastax.com/en/dse/6.7/dse-dev/datastax_enterprise/tools/nodetool/toolsStatus.html +[cqlsh]: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_using/startCqlshStandalone.html diff --git a/manual/core/logging/README.md b/manual/core/logging/README.md index 5d190373ec5..e3f8bfa7777 100644 --- a/manual/core/logging/README.md +++ b/manual/core/logging/README.md @@ -1,5 +1,34 @@ + + ## Logging +### Quick overview + +* based on SLF4J. +* config file examples for Logback and Log4J. + +**If you're looking for information about the request logger, see the [request +tracker](../request_tracker/#request-logger) page.** + +----- + The driver uses [SLF4J] as a logging facade. This allows you to plug in your preferred logging framework (java.util.logging, logback, log4j...) at deployment time. @@ -100,11 +129,6 @@ investigate an issue. Keep in mind that they are quite verbose, in particular TRACE. It's a good idea to only enable them on a limited set of categories. -### Logging request latencies - -The driver provides a built-in component to log the latency and outcome of every application -request. See the [request tracker](../request_tracker/#request-logger) page for more details. - ### Configuration examples #### Logback @@ -210,4 +234,4 @@ console). [SLF4J]: https://www.slf4j.org/ [binding]: https://www.slf4j.org/manual.html#swapping [Logback]: http://logback.qos.ch -[Log4J]: https://logging.apache.org/log4j \ No newline at end of file +[Log4J]: https://logging.apache.org/log4j diff --git a/manual/core/metadata/README.md b/manual/core/metadata/README.md index de47cd19b4a..73609ee0542 100644 --- a/manual/core/metadata/README.md +++ b/manual/core/metadata/README.md @@ -1,5 +1,35 @@ + + ## Metadata +### Quick overview + +[session.getMetadata()][Session#getMetadata]: node states, schema and token map. + +* immutable, provides a consistent view at a given point in time (e.g. token map always matches + schema). +* pitfall: holding onto a stale instance; must call `session.getMetadata()` again to observe + changes. + +----- + The driver exposes metadata about the Cassandra cluster via the [Session#getMetadata] method. It returns a [Metadata] object, which contains three types of information: @@ -42,6 +72,9 @@ Set tokenRanges = tokenMap.getTokenRanges(keyspace.getName(), node); This is a big improvement over previous versions of the driver, where it was possible to observe a new keyspace in the schema metadata before the token metadata was updated. -[Session#getMetadata]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html#getMetadata-- -[Metadata]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Metadata.html -[Node]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html \ No newline at end of file +Schema and node state events are debounced. This allows you to control how often the metadata gets +refreshed. See the [Performance](../performance/#debouncing) page for more details. + +[Session#getMetadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#getMetadata-- +[Metadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html +[Node]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html diff --git a/manual/core/metadata/node/README.md b/manual/core/metadata/node/README.md index a0d281ffcfd..fea04e5f262 100644 --- a/manual/core/metadata/node/README.md +++ b/manual/core/metadata/node/README.md @@ -1,5 +1,35 @@ + + ## Node metadata +### Quick overview + +[session.getMetadata().getNodes()][Metadata#getNodes]: all nodes known to the driver (even if not +actively connected). + +* [Node] instances are mutable, the fields will update in real time. +* getting notifications: + [CqlSession.builder().addNodeStateListener][SessionBuilder.addNodeStateListener]. + +----- + [Metadata#getNodes] returns all the nodes known to the driver when the metadata was retrieved; this includes down and ignored nodes (see below), so the fact that a node is in this list does not necessarily mean that the driver is connected to it. @@ -37,7 +67,52 @@ client. [Node#getDistance()] is set by the load balancing policy. The driver does not connect to `IGNORED` nodes. The exact definition of `LOCAL` and `REMOTE` is left to the interpretation of each policy, but in general it represents the proximity to the client, and `LOCAL` nodes will be prioritized as -coordinators. They also influence pooling options. +coordinators. They also influence pooling options. + +[Node#getExtras()] contains additional free-form properties. This is intended for future evolution +or custom driver extensions. In particular, if the driver is connected to DataStax Enterprise, the +map will contain additional information under the keys defined in [DseNodeProperties]: + +```java +Object rawDseVersion = node.getExtras().get(DseNodeProperties.DSE_VERSION); +Version dseVersion = (rawDseVersion == null) ? null : (Version) rawDseVersion; +``` + +### Notifications + +If you need to follow node state changes, you don't need to poll the metadata manually; instead, +you can register one or more listeners to get notified when changes occur: + +```java +NodeStateListener listener = + new NodeStateListenerBase() { + @Override + public void onUp(@NonNull Node node) { + System.out.printf("%s went UP%n", node); + } + }; +CqlSession session = CqlSession.builder() + .addNodeStateListener(listener) + .build(); +``` + +See [NodeStateListener] for the list of available methods. [NodeStateListenerBase] is a +convenience implementation with empty methods, for when you only need to override a few of them. + +It is also possible to register one or more listeners via the configuration: + +```hocon +datastax-java-driver { + advanced { + node-state-listener.classes = [com.example.app.MyNodeStateListener1,com.example.app.MyNodeStateListener2] + } +} +``` + +Listeners registered via configuration will be instantiated with reflection; they must have a public +constructor taking a `DriverContext` argument. + +The two registration methods (programmatic and via the configuration) can be used simultaneously. ### Advanced topics @@ -73,12 +148,17 @@ beyond the scope of this document; if you're interested, study the `TopologyMoni the source code. -[Metadata#getNodes]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Metadata.html#getNodes-- -[Node]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html -[Node#getState()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#getState-- -[Node#getDatacenter()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#getDatacenter-- -[Node#getRack()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#getRack-- -[Node#getDistance()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#getDistance-- -[Node#getOpenConnections()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#getOpenConnections-- -[Node#isReconnecting()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Node.html#isReconnecting-- -[NodeState]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/NodeState.html \ No newline at end of file +[Metadata#getNodes]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html#getNodes-- +[Node]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html +[Node#getState()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getState-- +[Node#getDatacenter()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getDatacenter-- +[Node#getRack()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getRack-- +[Node#getDistance()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getDistance-- +[Node#getExtras()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getExtras-- +[Node#getOpenConnections()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#getOpenConnections-- +[Node#isReconnecting()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Node.html#isReconnecting-- +[NodeState]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/NodeState.html +[NodeStateListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/NodeStateListener.html +[NodeStateListenerBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/NodeStateListenerBase.html +[SessionBuilder.addNodeStateListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addNodeStateListener-com.datastax.oss.driver.api.core.metadata.NodeStateListener- +[DseNodeProperties]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/DseNodeProperties.html diff --git a/manual/core/metadata/schema/README.md b/manual/core/metadata/schema/README.md index c6308209669..20521d1def4 100644 --- a/manual/core/metadata/schema/README.md +++ b/manual/core/metadata/schema/README.md @@ -1,5 +1,39 @@ + + ## Schema metadata +### Quick overview + +[session.getMetadata().getKeyspaces()][Metadata#getKeyspaces] + +* immutable (must invoke again to observe changes). +* getting notifications: + [CqlSession.builder().addSchemaChangeListener][SessionBuilder#addSchemaChangeListener]. +* enabling/disabling: `advanced.metadata.schema.enabled` in the configuration, or + [session.setSchemaMetadataEnabled()][Session#setSchemaMetadataEnabled]. +* filtering: `advanced.metadata.schema.refreshed-keyspaces` in the configuration. +* schema agreement: wait for the schema to replicate to all nodes (may add latency to DDL + statements). + +----- + [Metadata#getKeyspaces] returns a client-side representation of the database schema: ```java @@ -20,20 +54,54 @@ immutable; if you need to get the latest schema, be sure to call reference). +### DSE + +All schema metadata interfaces accessible through `Metadata.getKeyspaces()` have a DSE-specific +subtype in the package [com.datastax.dse.driver.api.core.metadata.schema]. The objects returned by +the DSE driver implement those types, so you can safely cast: + +```java +for (KeyspaceMetadata keyspace : session.getMetadata().getKeyspaces().values()) { + DseKeyspaceMetadata dseKeyspace = (DseKeyspaceMetadata) keyspace; +} +``` + +If you're calling a method that returns an optional and want to keep the result wrapped, use this +pattern: + +```java +Optional f = + session + .getMetadata() + .getKeyspace("ks") + .flatMap(ks -> ks.getFunction("f")) + .map(DseFunctionMetadata.class::cast); +``` + +For future extensibility, there is a `DseXxxMetadata` subtype for every OSS type. But currently (DSE +6.7), the only types that really add extra information are: + +* [DseFunctionMetadata]: add support for the `DETERMINISTIC` and `MONOTONIC` keywords; +* [DseAggregateMetadata]: add support for the `MONOTONIC` keyword. + +All other types (keyspaces, tables, etc.) are identical to their OSS counterparts. + ### Notifications If you need to follow schema changes, you don't need to poll the metadata manually; instead, -you can register a listener to get notified when changes occur: +you can register one or more listeners to get notified when changes occur: ```java SchemaChangeListener listener = - new SchemaChangeListenerBase() { - @Override - public void onTableCreated(TableMetadata table) { - System.out.println("New table: " + table.getName().asCql(true)); - } - }; -session.register(listener); + new SchemaChangeListenerBase() { + @Override + public void onTableCreated(TableMetadata table) { + System.out.println("New table: " + table.getName().asCql(true)); + } + }; +CqlSession session = CqlSession.builder() + .addSchemaChangeListener(listener) + .build(); session.execute("CREATE TABLE test.foo (k int PRIMARY KEY)"); ``` @@ -41,6 +109,20 @@ session.execute("CREATE TABLE test.foo (k int PRIMARY KEY)"); See [SchemaChangeListener] for the list of available methods. [SchemaChangeListenerBase] is a convenience implementation with empty methods, for when you only need to override a few of them. +It is also possible to register one or more listeners via the configuration: + +```hocon +datastax-java-driver { + advanced { + schema-change-listener.classes = [com.example.app.MySchemaChangeListener1,com.example.app.MySchemaChangeListener2] + } +} +``` + +Listeners registered via configuration will be instantiated with reflection; they must have a public +constructor taking a `DriverContext` argument. + +The two registration methods (programmatic and via the configuration) can be used simultaneously. ### Configuration @@ -83,7 +165,54 @@ You can also limit the metadata to a subset of keyspaces: datastax-java-driver.advanced.metadata.schema.refreshed-keyspaces = [ "users", "products" ] ``` -If the property is absent or the list is empty, it is interpreted as "all keyspaces". +Each element in the list can be one of the following: + +1. An exact name inclusion, for example `"Ks1"`. If the name is case-sensitive, it must appear in + its exact case. +2. An exact name exclusion, for example `"!Ks1"`. +3. A regex inclusion, enclosed in slashes, for example `"/^Ks.*/"`. The part between the slashes + must follow the syntax rules of [java.util.regex.Pattern]. The regex must match the entire + keyspace name (no partial matching). +4. A regex exclusion, for example `"!/^Ks.*/"`. + +If the list is empty, or the option is unset, all keyspaces will match. Otherwise: + +* If a keyspace matches an exact name inclusion, it is always included, regardless of what any other + rule says. +* Otherwise, if it matches an exact name exclusion, it is always excluded, regardless of what any + regex rule says. +* Otherwise, if there are regex rules: + + * if they're only inclusions, the keyspace must match at least one of them. + * if they're only exclusions, the keyspace must match none of them. + * if they're both, the keyspace must match at least one inclusion and none of the + exclusions. + +For example, given the keyspaces `system`, `ks1`, `ks2`, `data1` and `data2`, here's the outcome of +a few filters: + +|Filter|Outcome|Translation| +|---|---|---| +| `[]` | `system`, `ks1`, `ks2`, `data1`, `data2` | Include all. | +| `["ks1", "ks2"]` | `ks1`, `ks2` | Include ks1 and ks2 (recommended, see explanation below). | +| `["!system"]` | `ks1`, `ks2`, `data1`, `data2` | Include all except system. | +| `["/^ks.*/"]` | `ks1`, `ks2` | Include all that start with ks. | +| `["!/^ks.*/"]` | `system`, `data1`, `data2` | Exclude all that start with ks (and include everything else). | +| `["system", "/^ks.*/"]` | `system`, `ks1`, `ks2` | Include system, and all that start with ks. | +| `["/^ks.*/", "!ks2"]` | `ks1` | Include all that start with ks, except ks2. | +| `["!/^ks.*/", "ks1"]` | `system`, `ks1`, `data1`, `data2` | Exclude all that start with ks, except ks1 (and also include everything else). | +| `["/^s.*/", /^ks.*/", "!/.*2$/"]` | `system`, `ks1` | Include all that start with s or ks, except if they end with 2. | + + +If an element is malformed, or if its regex has a syntax error, a warning is logged and that single +element is ignored. + +The default configuration (see [reference.conf](../../configuration/reference/)) excludes all +Cassandra and DSE system keyspaces. + +Try to use only exact name inclusions if possible. This allows the driver to filter on the server +side with a `WHERE IN` clause. If you use any other rule, it has to fetch all system rows and filter +on the client side. Note that, if you change the list at runtime, `onKeyspaceAdded`/`onKeyspaceDropped` will be invoked on your schema listeners for the newly included/excluded keyspaces. @@ -200,18 +329,27 @@ practice anyway: if you're in the middle of a rolling upgrade, you're probably n changes at the same time. -#### Relation to token metadata +### Relation to token metadata Some of the data in the [token map](../token/) relies on keyspace metadata (any method that takes a `CqlIdentifier` argument). If schema metadata is disabled or filtered, token metadata will also be unavailable for the excluded keyspaces. +### Performing schema updates from the client + +If you issue schema-altering requests from the driver (e.g. `session.execute("CREATE TABLE ..")`), +take a look at the [Performance](../../performance/#schema-updates) page for a few tips. -[Metadata#getKeyspaces]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Metadata.html#getKeyspaces-- -[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.html -[SchemaChangeListenerBase]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.html -[Session#setSchemaMetadataEnabled]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html#setSchemaMetadataEnabled-java.lang.Boolean- -[Session#checkSchemaAgreementAsync]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html#checkSchemaAgreementAsync-- -[ExecutionInfo#isSchemaInAgreement]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#isSchemaInAgreement-- +[Metadata#getKeyspaces]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html#getKeyspaces-- +[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.html +[SchemaChangeListenerBase]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListenerBase.html +[Session#setSchemaMetadataEnabled]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#setSchemaMetadataEnabled-java.lang.Boolean- +[Session#checkSchemaAgreementAsync]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#checkSchemaAgreementAsync-- +[SessionBuilder#addSchemaChangeListener]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addSchemaChangeListener-com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener- +[ExecutionInfo#isSchemaInAgreement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#isSchemaInAgreement-- +[com.datastax.dse.driver.api.core.metadata.schema]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/schema/package-frame.html +[DseFunctionMetadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/schema/DseFunctionMetadata.html +[DseAggregateMetadata]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/metadata/schema/DseAggregateMetadata.html -[JAVA-750]: https://datastax-oss.atlassian.net/browse/JAVA-750 \ No newline at end of file +[JAVA-750]: https://datastax-oss.atlassian.net/browse/JAVA-750 +[java.util.regex.Pattern]: https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html diff --git a/manual/core/metadata/token/README.md b/manual/core/metadata/token/README.md index 9934400b2ef..4d7cd9252df 100644 --- a/manual/core/metadata/token/README.md +++ b/manual/core/metadata/token/README.md @@ -1,5 +1,34 @@ + + ## Token metadata +### Quick overview + +[session.getMetadata().getTokenMap()][Metadata#getTokenMap] + +* used for token-aware routing or analytics clients. +* immutable (must invoke again to observe changes). +* `advanced.metadata.token-map.enabled` in the configuration (defaults to true). + +----- + [Metadata#getTokenMap] returns information about the tokens used for data replication. It is used internally by the driver to send requests to the optimal coordinator when token-aware routing is enabled. Another typical use case is data analytics clients, for example fetching a large range of @@ -159,5 +188,5 @@ on [schema metadata](../schema/). If schema metadata is disabled or filtered, to also be unavailable for the excluded keyspaces. -[Metadata#getTokenMap]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/Metadata.html#getTokenMap-- -[TokenMap]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/metadata/TokenMap.html \ No newline at end of file +[Metadata#getTokenMap]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/Metadata.html#getTokenMap-- +[TokenMap]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/TokenMap.html diff --git a/manual/core/metrics/README.md b/manual/core/metrics/README.md index 468ea436bbe..ef5d9b453f0 100644 --- a/manual/core/metrics/README.md +++ b/manual/core/metrics/README.md @@ -1,30 +1,157 @@ + + ## Metrics -The driver exposes measurements of its internal behavior through the popular [Dropwizard Metrics] -library. Application developers can select which metrics are enabled, and export them to a -monitoring tool. +### Quick overview + +* `advanced.metrics` in the configuration. All metrics disabled by default. To enable, select the + metrics library to use, then define which individual metrics to activate. +* some metrics are per node, others global to the session, or both. +* unlike driver 3, JMX is not provided out of the box. You need to add the dependency manually. + +----- + +The driver is able to report measurements of its internal behavior to a variety of metrics +libraries, and ships with bindings for three popular ones: [Dropwizard Metrics] , [Micrometer +Metrics] and [MicroProfile Metrics]. + +### Selecting a Metrics Library + +#### Dropwizard Metrics + +Dropwizard is the driver's default metrics library; there is no additional configuration nor any +extra dependency to add if you wish to use Dropwizard. + +#### Micrometer + +To use Micrometer you must: + +1. Define `MicrometerMetricsFactory` as the metrics factory to use in the driver configuration: + +``` +datastax-java-driver.advanced.metrics { + factory.class = MicrometerMetricsFactory +} +``` + +2. Add a dependency to `java-driver-metrics-micrometer` in your application. This separate driver +module contains the actual bindings for Micrometer, and depends itself on the Micrometer library: + +```xml + + org.apache.cassandra + java-driver-metrics-micrometer + ${driver.version} + +``` + +3. You should also exclude Dropwizard and HdrHistogram, which are two transitive dependencies of the +driver, because they are not relevant when using Micrometer: + +```xml + + org.apache.cassandra + java-driver-core + + + io.dropwizard.metrics + metrics-core + + + org.hdrhistogram + HdrHistogram + + + +``` + +#### MicroProfile Metrics + +To use MicroProfile Metrics you must: + +1. Define `MicroProfileMetricsFactory` as the metrics factory to use in the driver configuration: + +``` +datastax-java-driver.advanced.metrics { + factory.class = MicroProfileMetricsFactory +} +``` + +2. Add a dependency to `java-driver-metrics-microprofile` in your application. This separate driver +module contains the actual bindings for MicroProfile, and depends itself on the MicroProfile Metrics +library: + +```xml + + org.apache.cassandra + java-driver-metrics-microprofile + ${driver.version} + +``` + +3. You should also exclude Dropwizard and HdrHistogram, which are two transitive dependencies of the +driver, because they are not relevant when using MicroProfile Metrics: + +```xml + + org.apache.cassandra + java-driver-core + + + io.dropwizard.metrics + metrics-core + + + org.hdrhistogram + HdrHistogram + + + +``` + +#### Other Metrics libraries + +Other metrics libraries can also be used. However, you will need to provide a custom +metrics factory. Simply implement the +`com.datastax.oss.driver.internal.core.metrics.MetricsFactory` interface for your library of choice, +then pass the fully-qualified name of that implementation class to the driver using the +`advanced.metrics.factory.class` option. See the [reference configuration]. + +You will certainly need to add the metrics library as a dependency to your application as well. +It is also recommended excluding Dropwizard and HdrHistogram, as shown above. + +### Enabling specific driver metrics -### Structure - -There are two categories of metrics: +Now that the metrics library is configured, you need to activate the driver metrics you are +interested in. + +There are two categories of driver metrics: * session-level: the measured data is global to a `Session` instance. For example, `connected-nodes` measures the number of nodes to which we have connections. * node-level: the data is specific to a node (and therefore there is one metric instance per node). For example, `pool.open-connections` measures the number of connections open to this particular node. - -Metric names are path-like, dot-separated strings. The driver prefixes them with the name of the -session (see `session-name` in the configuration), and in the case of node-level metrics, `nodes` -followed by a textual representation of the node's address. For example: - -``` -s0.connected-nodes => 2 -s0.nodes.127_0_0_1:9042.pool.open-connections => 2 -s0.nodes.127_0_0_2:9042.pool.open-connections => 1 -``` -### Configuration +To find out which metrics are available, see the [reference configuration]. It contains a +commented-out line for each metric, with detailed explanations on its intended usage. By default, all metrics are disabled. You can turn them on individually in the configuration, by adding their name to these lists: @@ -36,31 +163,139 @@ datastax-java-driver.advanced.metrics { } ``` -To find out which metrics are available, see the [reference configuration]. It contains a -commented-out line for each metric, with detailed explanations on its intended usage. +If you specify a metric that doesn't exist, it will be ignored, and a warning will be logged. + +Finally, if you are using Dropwizard or Micrometer and enabled any metric of timer type, such as +`cql-requests`, it is also possible to provide additional configuration to fine-tune the underlying +histogram's characteristics and precision, such as its highest expected latency, its number of +significant digits to use, and its refresh interval. Again, see the [reference configuration] for +more details. + +### Selecting a metric identifier style + +Most metric libraries uniquely identify a metric by a name and, optionally, by a set of key-value +pairs, usually called tags. + +The `advanced.metrics.id-generator.class` option is used to customize how the driver generates +metric identifiers. The driver ships with two built-in implementations: + +- `DefaultMetricIdGenerator`: generates identifiers composed solely of (unique) metric names; it + does not generate tags. All metric names start with the name of the session (see `session-name` in + the configuration), and in the case of node-level metrics, this is followed by `.nodes.`, followed + by a textual representation of the node's address. All names end with the metric distinctive name. + See below for examples. This generator is mostly suitable for use with metrics libraries that do + not support tags, like Dropwizard. + +- `TaggingMetricIdGenerator`: generates identifiers composed of a name and one or two tags. + Session-level metric names start with the `session.` prefix followed by the metric distinctive + name; node-level metric names start with the `nodes.` prefix followed by the metric distinctive + name. Session-level tags will include a `session` tag whose value is the session name (see + `session-name` in the configuration); node-level tags will include the same `session` tag, and + also a `node` tag whose value is the node's address. See below for examples. This generator is + mostly suitable for use with metrics libraries that support tags, like Micrometer or MicroProfile + Metrics. + +For example, here is how each one of them generates identifiers for the session metric "bytes-sent", +assuming that the session is named "s0": + +- `DefaultMetricIdGenerator`: + - name:`s0.bytes-sent` + - tags: `{}` +- `TaggingMetricIdGenerator`: + - name: `session.bytes-sent` + - tags: `{ "session" : "s0" }` + +Here is how each one of them generates identifiers for the node metric "bytes-sent", assuming that +the session is named "s0", and the node's broadcast address is 10.1.2.3:9042: -If you specify a metric that doesn't exist, it will be ignored and a warning will be logged. +- `DefaultMetricIdGenerator`: + - name : `s0.nodes.10_1_2_3:9042.bytes-sent` + - tags: `{}` +- `TaggingMetricIdGenerator`: + - name `nodes.bytes-sent` + - tags: `{ "session" : "s0", "node" : "\10.1.2.3:9042" }` -The `metrics` section may also contain additional configuration for some specific metrics; again, -see the [reference configuration] for more details. +As shown above, both built-in implementations generate names that are path-like structures separated +by dots. This is indeed the most common expected format by reporting tools. -### Export +Finally, it is also possible to define a global prefix for all metric names; this can be done with +the `advanced.metrics.id-generator.prefix` option. -The Dropwizard `MetricRegistry` is exposed via `session.getMetrics()`. You can retrieve it and -configure a `Reporter` to send the metrics to a monitoring tool. +The prefix should not start nor end with a dot or any other path separator; the following are two +valid examples: `cassandra` or `myapp.prod.cassandra`. -#### JMX +For example, if this prefix is set to `cassandra`, here is how the session metric "bytes-sent" would +be named, assuming that the session is named "s0": + +- with `DefaultMetricIdGenerator`: `cassandra.s0.bytes-sent` +- with `TaggingMetricIdGenerator`: `cassandra.session.bytes-sent` + +Here is how the node metric "bytes-sent" would be named, assuming that the session is named "s0", +and the node's broadcast address is 10.1.2.3:9042: + +- with `DefaultMetricIdGenerator`: `cassandra.s0.nodes.10_1_2_3:9042.bytes-sent` +- with `TaggingMetricIdGenerator`: `cassandra.nodes.bytes-sent` + +### Using an external metric registry + +Regardless of which metrics library is used, you can provide an external metric registry object when +building a session. This allows the driver to transparently export its operational metrics to +whatever reporting system you want to use. + +To pass a metric registry object to the session, use the `CqlSessionBuilder.withMetricRegistry()` +method: + +```java +CqlSessionBuilder builder = CqlSession.builder(); +builder.withMetricRegistry(myRegistryObject); +CqlSession session = builder.build(); +``` + +Beware that the driver does not inspect the provided object, it simply passes it to the metrics +factory in use; it is the user's responsibility to provide registry objects compatible with the +metrics library in use. For reference, here are the expected base types for the three built-in +metrics libraries: + +* Dropwizard: `com.codahale.metrics.MetricRegistry` +* Micrometer: `io.micrometer.core.instrument.MeterRegistry` +* MicroProfile: `org.eclipse.microprofile.metrics.MetricRegistry` + +**NOTE:** MicroProfile **requires** an external instance of its registry to be provided. For +Micrometer, if no registry object is provided, Micrometer's `globalRegistry` will be used. For +Dropwizard, if no registry object is provided, an instance of `MetricRegistry` will be created and +used (in which case, it can be retrieved programmatically if needed, see below). + +### Programmatic access to driver metrics + +Programmatic access to driver metrics is only available when using Dropwizard Metrics. Users of +other libraries are encouraged to provide an external registry when creating the driver session (see +above), then use it to gain programmatic access to the driver metrics. + +The Dropwizard `MetricRegistry` object is exposed in the driver API via +`session.getMetrics().getRegistry()`. You can retrieve it and, for example, configure a `Reporter` +to send the metrics to a monitoring tool. + +**NOTE:** Beware that `session.getMetrics()` is not available when using other metrics libraries, +and will throw a `NoClassDefFoundError` at runtime if accessed in such circumstances. + +### Exposing driver metrics with JMX Unlike previous driver versions, JMX support is not included out of the box. +The way to add JMX support to your application depends largely on the metrics library being used. We +show below instructions for Dropwizard only. Micrometer also has support for JMX: please refer to +its [official documentation][Micrometer JMX]. + +#### Dropwizard Metrics + Add the following dependency to your application (make sure the version matches the `metrics-core` dependency of the driver): -``` +```xml io.dropwizard.metrics metrics-jmx - 4.0.2 + 4.1.2 ``` @@ -120,12 +355,14 @@ JmxReporter reporter = reporter.start(); ``` -#### Other protocols +### Exporting metrics with other protocols Dropwizard Metrics has built-in reporters for other output formats: JSON (via a servlet), stdout, CSV files, SLF4J logs and Graphite. Refer to their [manual][Dropwizard manual] for more details. - -[Dropwizard Metrics]: http://metrics.dropwizard.io/4.0.0/manual/index.html -[Dropwizard Manual]: http://metrics.dropwizard.io/4.0.0/getting-started.html#reporting-via-http -[reference configuration]: ../configuration/reference/ \ No newline at end of file +[Dropwizard Metrics]: https://metrics.dropwizard.io/4.1.2 +[Dropwizard Manual]: https://metrics.dropwizard.io/4.1.2/getting-started.html +[Micrometer Metrics]: https://micrometer.io/docs +[Micrometer JMX]: https://micrometer.io/docs/registry/jmx +[MicroProfile Metrics]: https://github.com/eclipse/microprofile-metrics +[reference configuration]: ../configuration/reference/ diff --git a/manual/core/native_protocol/README.md b/manual/core/native_protocol/README.md index ff48e2b6252..42146e63f42 100644 --- a/manual/core/native_protocol/README.md +++ b/manual/core/native_protocol/README.md @@ -1,5 +1,38 @@ + + ## Native protocol +### Quick overview + +Low-level binary format. Mostly irrelevant for everyday use, only governs whether certain features +are available. + +* setting the version: + * automatically negotiated during the connection (improved algorithm in driver 4, no longer an + issue in mixed clusters). + * or force with `advanced.protocol.version` in the configuration. +* reading the version: + [session.getContext().getProtocolVersion()][AttachmentPoint.getProtocolVersion]. + +----- + The native protocol defines the format of the binary messages exchanged between the driver and Cassandra over TCP. As a driver user, you don't need to know the fine details (although the [protocol spec] is available if you're curious); the most visible aspect is that some features are @@ -7,20 +40,29 @@ only available with specific protocol versions. ### Compatibility matrix -Java driver 4 supports protocol versions 3 to 5. By default, the version is negotiated with the +Java Driver 4 supports protocol versions 3 to 5. By default, the version is negotiated with the first node the driver connects to: -| Cassandra version | Negotiated protocol version with driver 4 ¹ | -|---------------------|-------------------------------------------------| -| 2.1.x (DSE 4.7/4.8) | v3 | -| 2.2.x | v4 | -| 3.x (DSE 5.0/5.1) | v4 | -| 4.x ² | v5 | +| Cassandra version | Negotiated protocol version with driver 4 ¹ | +|-------------------|-------------------------------------------------| +| 2.1.x | v3 | +| 2.2.x | v4 | +| 3.x | v4 | +| 4.x | v5 | *(1) for previous driver versions, see the [3.x documentation][driver3]* -*(2) at the time of writing, Cassandra 4 is not released yet. Protocol v5 support is still in beta, -and must be enabled explicitly (negotiation will yield v4).* +Since version 4.5.0, the driver can also use DSE protocols when all nodes are running a version of +DSE. The table below shows the protocol matrix for these cases: + +| DSE version | Negotiated protocol version with driver 4 | +|---------------------|-------------------------------------------------| +| 4.7/4.8 | v3 | +| 5.0 | v4 | +| 5.1 | DSE_V1 ² | +| 6.0/6.7/6.8 | DSE_V2 ² | + +*(2) DSE Protocols are chosen before other Cassandra native protocols.* ### Controlling the protocol version @@ -36,16 +78,19 @@ the [configuration](../configuration/): ``` datastax-java-driver { advanced.protocol { - version = v3 + version = V3 } } ``` +Note that the protocol version you specify above is case sensitive so make sure to only use uppercase letters. +"V3" is correct, "v3" is not. + If you force a version that is too high for the server, you'll get an error: ``` Exception in thread "main" com.datastax.oss.driver.api.core.AllNodesFailedException: - All 1 node tried for the query failed (showing first 1, use getErrors() for more: + All 1 node tried for the query failed (showing first 1 nodes, use getAllErrors() for more: /127.0.0.1:9042: com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException: [/127.0.0.1:9042] Host does not support protocol version V5) ``` @@ -72,14 +117,17 @@ force the protocol version manually anymore. ### Debugging protocol negotiation -The main steps are [logged](../logging/) at level `INFO`. If the driver downgrades while negotiating -with the first node, you should see logs such as: +You can observe the negotiation process in the [logs](../logging/). + +The versions tried while negotiating with the first node are logged at level `DEBUG` in the category +`com.datastax.oss.driver.internal.core.channel.ChannelFactory`: ``` -INFO ChannelFactory - Failed to connect with protocol v4, retrying with v3 +DEBUG ChannelFactory - Failed to connect with protocol v4, retrying with v3 ``` -If it then detects a mixed cluster with lower versions, it will log: +If a mixed cluster renegotiation happens, it is logged at level `INFO` in the category +`com.datastax.oss.driver.internal.core.session.DefaultSession`: ``` INFO DefaultSession - Negotiated protocol version v4 for the initial contact point, but other nodes @@ -104,7 +152,8 @@ If you want to see the details of mixed cluster negotiation, enable `DEBUG` leve in the face of schema changes [protocol spec]: https://github.com/datastax/native-protocol/tree/1.x/src/main/resources -[driver3]: https://docs.datastax.com/en/developer/java-driver/3.5/manual/native_protocol/ +[driver3]: https://docs.datastax.com/en/developer/java-driver/3.10/manual/native_protocol/ -[ExecutionInfo.getWarnings]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getWarnings-- -[Request.getCustomPayload]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Request.html#getCustomPayload-- \ No newline at end of file +[ExecutionInfo.getWarnings]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getWarnings-- +[Request.getCustomPayload]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getCustomPayload-- +[AttachmentPoint.getProtocolVersion]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/detach/AttachmentPoint.html#getProtocolVersion-- diff --git a/manual/core/non_blocking/README.md b/manual/core/non_blocking/README.md new file mode 100644 index 00000000000..f320ffd13d2 --- /dev/null +++ b/manual/core/non_blocking/README.md @@ -0,0 +1,306 @@ + + +## Non-blocking programming + +### Quick overview + +With the advent of reactive programming, the demand for fully non-blocking libraries has become +popular among application developers. The recent availability of frameworks enforcing lock-freedom, +such as [Vert.x] or [Reactor], along with tools for automatic detection of blocking calls like +[BlockHound], has exacerbated this trend even more so. + +[Vert.x]: https://vertx.io +[Reactor]: https://projectreactor.io +[BlockHound]: https://github.com/reactor/BlockHound + +**In summary, when used properly, the Java Driver offers non-blocking guarantees for most +of its operations, and during most of the session lifecycle.** + +These guarantees and their exceptions are detailed below. A final chapter explains how to use the +driver with BlockHound. + +The developer guide also has more information on driver internals and its +[concurrency model](../../developer/common/concurrency). + +### Definition of "non-blocking" + +Since the term "non-blocking" is subject to interpretation, in this page the term should be +understood as "[lock-free]": a program is non-blocking if at least one thread is guaranteed to make +progress; such programs are implemented without locks, mutexes nor semaphores, using only low-level +primitives such as atomic variables and CAS (compare-and-swap) instructions. + +A further distinction is generally established between "lock-free" and "wait-free" algorithms: the +former ones allow progress of the overall system, while the latter ones allow each thread to make +progress at any time. This distinction is however rather theoretical and is outside the scope of +this document. + +[lock-free]: https://www.baeldung.com/lock-free-programming + +### Driver lock-free guarantees + +#### Driver lock-free guarantees per execution models + +The driver offers many execution models. For the built-in ones, the lock-free guarantees are as +follows: + +* The synchronous API is blocking and does not offer any lock-free guarantee. +* The [asynchronous](../async) API is implemented in lock-free algorithms. +* The [reactive](../reactive) API is implemented in lock-free algorithms (it's actually wait-free). + +For example, calling any synchronous method declared in [`SyncCqlSession`], such as [`execute`], +will block until the result is available. These methods should never be used in non-blocking +applications. + +[`SyncCqlSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html` +[`execute`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html#execute-com.datastax.oss.driver.api.core.cql.Statement- + +However, the asynchronous methods declared in [`AsyncCqlSession`], such as [`executeAsync`], are all +safe for use in non-blocking applications; the statement execution and asynchronous result delivery +is guaranteed to never block. + +[`AsyncCqlSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.html +[`executeAsync`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncCqlSession.html#executeAsync-com.datastax.oss.driver.api.core.cql.Statement- + +The same applies to the methods declared in [`ReactiveSession`] such as [`executeReactive`]: the +returned publisher will never block when subscribed to, until the final results are delivered to +the subscriber. + +[`ReactiveSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.html +[`executeReactive`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.html#executeReactive-com.datastax.oss.driver.api.core.cql.Statement- + +There is one exception though: continuous paging queries (a feature specific to DSE) have a special +execution model which uses internal locks for coordination. Although such locks are only held for +extremely brief periods of time, and never under high contention, this execution model doesn't +qualify as lock-free. + +As a consequence, all methods declared in [`ContinuousSession`] and [`ContinuousReactiveSession`] +cannot be considered as implemented 100% lock-free, even those built on top of the asynchronous or +reactive APIs like [`executeContinuouslyAsync`] and [`executeContinuouslyReactive`]. In practice +though, continuous paging is extremely efficient and can safely be used in most non-blocking +contexts, unless they require strict lock-freedom. + +[`ContinuousSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.html +[`ContinuousReactiveSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.html +[`executeContinuouslyAsync`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/ContinuousSession.html#executeContinuouslyAsync-com.datastax.oss.driver.api.core.cql.Statement- +[`executeContinuouslyReactive`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/continuous/reactive/ContinuousReactiveSession.html#executeContinuouslyReactive-com.datastax.oss.driver.api.core.cql.Statement- + +#### Driver lock-free guarantees per session lifecycle phases + +The guarantees vary according to three possible session states: initializing, running, and closing. + +Session initialization is a costly operation that performs many I/O operations, hitting both the +local filesystem (configuration files) and the network (connection initialization). This procedure +is triggered by a call to [`SessionBuilder.buildAsync()`] and happens partially on the calling +thread, and partially asynchronously on an internal driver thread. + +* The creation of the [driver context] happens synchronously on the calling thread. The context + creation usually requires file I/O, mainly to read configuration files. A call to + `SessionBuilder.buildAsync()`, in spite of its name, is thus a blocking call and must be + dispatched to a thread that is allowed to block. +* The rest of the initialization process will happen asynchronously, on an internal driver admin + thread. This process is mostly non-blocking, with a few exceptions listed below. Therefore, + the driver admin thread performing the initialization tasks must be allowed to block, at least + temporarily. + +[driver context]: ../../developer/common/context + +For the reasons above, the initialization phase obviously doesn't qualify as lock-free. For +non-blocking applications, it is generally advised to trigger session initialization during +application startup, before strong non-blocking guarantees are enforced on application threads. + +Similarly, a call to [`SessionBuilder.build()`] should be considered blocking as it will block the +calling thread and wait until the method returns. For this reason, calls to `SessionBuilder.build()` +should be avoided in non-blocking applications. + +[`SessionBuilder.buildAsync()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#buildAsync-- +[`SessionBuilder.build()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#build-- + +Once the session is initialized, however, the driver is guaranteed to be non-blocking during the +session's lifecycle, and under normal operation, unless otherwise noted elsewhere in this document. + +Finally, closing the session is generally non-blocking, but the driver offers no strong guarantees +during that phase. Therefore, calls to any method declared in [`AsyncAutoCloseable`], including the +asynchronous ones like [`closeAsync()`], should also be preferably deferred until the application is +shut down and lock-freedom enforcement is disabled. + +[`AsyncAutoCloseable`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncAutoCloseable.html +[`closeAsync()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncAutoCloseable.html#closeAsync-- + +#### Driver lock-free guarantees for specific components + +Certain driver components are not implemented in lock-free algorithms. + +For example, [`SafeInitNodeStateListener`] is implemented with internal locks for coordination. It +should not be used if strict lock-freedom is enforced. + +[`SafeInitNodeStateListener`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/metadata/SafeInitNodeStateListener.html + +The `RateLimitingRequestThrottler` is currently blocking. The `ConcurrencyLimitingRequestThrottler` +is lock-free. + +See the section about [throttling](../throttling) for details about these components. Depending on +how many requests are being executed in parallel, the thread contention on these locks can be high: +in short, if your application enforces strict lock-freedom, then you should not use the +`RateLimitingRequestThrottler`. + +[request throttlers]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/throttling/RequestThrottler.html + +Other components may be lock-free, *except* for their first invocation. This is the case of the +following items: + +* All built-in implementations of [`TimestampGenerator`], upon instantiation; +* The utility method [`Uuids.timeBased()`]. + +[`TimestampGenerator`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/time/TimestampGenerator.html +[`Uuids.timeBased()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/uuid/Uuids.html#timeBased-- + +Both components need to access native libraries when they get initialized and this may involve +hitting the local filesystem, thus causing the initialization to become a blocking call. + +Timestamp generators are automatically created when the session is initialized, and are thus +generally safe to use afterwards. + +`Uuids.timeBased()`, however, is a convenience method that the driver doesn't use internally. For +this reason, it is advised that this method be called once during application startup, so that it is +safe to use it afterwards in a non-blocking context. + +Alternatively, it's possible to disable the usage of client-side timestamp generation, and/or the +usage of native libraries. See the manual sections on [query timestamps](../query_timestamps) and +[integration](../integration) for more information. + +One component, the codec registry, can block when its [`register`] method is called; it is +therefore advised that codecs should be registered during application startup exclusively. See the +[custom codecs](../custom_codecs) section for more details about registering codecs. + +[`register`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/MutableCodecRegistry.html#register-com.datastax.oss.driver.api.core.type.codec.TypeCodec- + +Finally, a few internal components also use locks, but only during session initialization; once the +session is ready, they are either discarded, or don't use locks anymore for the rest of the +session's lifecycle. + +These components are safe to use once the session is ready, although they could be reported by +lock-freedom monitoring tools. They are listed below in case their exclusion is necessary: + +* `com.datastax.oss.driver.internal.core.context.DefaultNettyOptions` +* `com.datastax.oss.driver.internal.core.util.concurrent.LazyReference` +* `com.datastax.oss.driver.internal.core.util.concurrent.ReplayingEventFilter` + +#### Driver lock-free guarantees on topology and status events + +Topology and status events can cause the driver to use locks temporarily. + +When a node gets added to the cluster, or when a node state changes (DOWN to UP or vice versa), the +driver needs to notify a few components: the load balancing policies need to coordinate in order to +assign a new distance to the node (LOCAL, REMOTE or IGNORED); and the node connection pool will have +to be resized either to accommodate new connections, or to close existing ones. + +These operations use internal locks for coordination. Again, they are only held for extremely brief +periods of time, and never under high contention. Note that this behavior cannot be disabled or +changed; if you need to enforce strict lock-freedom, and topology or status changes are being +reported as infringements, consider adding exceptions for the following method calls: + + * `com.datastax.oss.driver.internal.core.pool.ChannelSet#add(DriverChannel)` + * `com.datastax.oss.driver.internal.core.pool.ChannelSet#remote(DriverChannel)` + * `com.datastax.oss.driver.internal.core.metadata.LoadBalancingPolicyWrapper$SinglePolicyDistanceReporter#setDistance(Node,NodeDistance)` + +#### Driver lock-free guarantees on random uuid generation + +Until driver 4.9, the [`Uuids.random()`] method was a blocking call. Because of that, this method +could not be used in non-blocking contexts, making UUID generation a difficult issue to solve. + +Moreover, this method is used in a few places internally. This situation was unfortunate because +lock-freedom enforcement tools could report calls to that method, but it was impossible to suppress +these calls. Thanks to [JAVA-2449], released with driver 4.10.0, `Uuids.random()` became a +non-blocking call and random UUIDs can now be safely generated in non-blocking applications. + +[`Uuids.random()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-- +[JAVA-2449]: https://datastax-oss.atlassian.net/browse/JAVA-2449 + +#### Driver lock-free guarantees when reloading the configuration + +The driver has a pluggable configuration mechanism built around the [`DriverConfigLoader`] +interface. Implementors may choose to support [hot-reloading] of configuration files, and the +default built-in implementation has this feature enabled by default. + +Beware that a hot-reloading of the default configuration mechanism is performed on a driver internal +admin thread. If hot-reloading is enabled, then this might be reported by lock-freedom infringement +detectors. If that is the case, it is advised to disable hot-reloading by setting the +`datastax-java-driver.basic.config-reload-interval` option to 0. See the manual page on +[configuration](../configuration) for more information. + +[`DriverConfigLoader`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html +[hot-reloading]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#supportsReloading-- + +#### Driver lock-free guarantees when connecting to DSE + +When connecting to clusters running recent DSE versions, the driver automatically enables periodic +status reporting. When preparing the status report, the driver has to hit the local filesystem, and +because of that, the status reporting process does not qualify as lock-free. + +If lock-freedom is being enforced, then automatic status reporting must be disabled by setting the +`datastax-java-driver.advanced.monitor-reporting.enabled` property to false in the driver +configuration. + +### Driver mechanism for detection of blocking calls + +The driver has its own mechanism for detecting blocking calls happening on an internal driver +thread. This mechanism is capable of detecting and reporting blatant cases of misuse of the +asynchronous and reactive APIs, e.g. when the synchronous API is invoked inside a future or callback +produced by the asynchronous execution of a statement. See the core manual page on the +[asynchronous](../async) API or the developer manual page on +[driver concurrency](../../developer/common/concurrency) for details. + +The driver is not capable, however, of detecting low-level lock-freedom infringements, such as the +usage of locks. You must use an external tool to achieve that. See below how to use BlockHound for +that. + +### Using the driver with Reactor BlockHound + +[Reactor]'s tool for automatic detection of blocking calls, [BlockHound], is capable of detecting +and reporting any sort of blocking calls, including I/O, locks, `Thread.sleep`, etc. + +When used with the driver, BlockHound can report some calls that, for the reasons explained above, +could be safely considered as false positives. + +For this reason, the driver, since version 4.10, ships with a custom `DriverBlockHoundIntegration` +class which is automatically discovered by BlockHound through the Service Loader mechanism. It +contains BlockHound customizations that target most of the cases detailed above, and prevent them +from being reported as blocking calls. + +More specifically, the following items are currently declared to be allowed: + +* Loading of native libraries during startup (`TimestampGenerator`); +* Locks held during startup only (`DefaultNettyOptions`, `LazyReference`, `ReplayingEventFilter`); +* Locks held during startup and topology and status events processing (`ChannelSet`, + `DistanceReporter`); +* Locks held when executing continuous paging queries; +* Locks held during calls to `MutableCodecRegistry.register()` and `Uuids.timeBased()`. + +The following items are NOT declared to be allowed and are likely to be reported by BlockHound if +used: + +* Request throttlers; +* Automatic status reporting; +* `SafeInitNodeStateListener`. + +Note that other blocking startup steps, e.g. loading of configuration files, are also not declared +to be allowed, because these are genuine blocking I/O calls. For this reason, if BlockHound is being +used, the loading of the driver context, performed by the thread calling `SessionBuilder.build()` +or `SessionBuilder.buildAsync()`, must be allowed to perform blocking calls. diff --git a/manual/core/paging/README.md b/manual/core/paging/README.md index 444f249ff5f..2df92bd69d1 100644 --- a/manual/core/paging/README.md +++ b/manual/core/paging/README.md @@ -1,5 +1,40 @@ + + ## Paging +### Quick overview + +How the server splits large result sets into multiple network responses. + +* `basic.request.page-size` in the configuration. +* transparent in the synchronous API (`session.execute`): the driver fetches new pages in the + background as you iterate. +* explicit in the asynchronous API (`session.executeAsync`): + [AsyncResultSet.hasMorePages()][AsyncPagingIterable.hasMorePages] and + [AsyncResultSet.fetchNextPage()][AsyncPagingIterable.fetchNextPage]. +* paging state: record the current position and reuse it later (forward only). +* offset queries: emulated client-side with [OffsetPager] \(**this comes with important performance + trade-offs, make sure you read and understand the full documentation below**). + +----- + When a query returns many rows, it would be inefficient to return them as a single response message. Instead, the driver breaks the results into *pages* which get returned as they are needed. @@ -64,57 +99,61 @@ for (Row row : rs) { |<------------------------ | | ``` -By default, the background fetch happens at the last moment, when there are no more "local" rows -available. If you need finer control, [ResultSet] provides the following methods: - -* `getAvailableWithoutFetching()` and `isFullyFetched()` to check the current state; -* `fetchMoreResults()` to force a page fetch. - -Here's how you could use these methods to pre-fetch the next page in advance, in order to avoid the -performance hit at the end of each page: - -```java -ResultSet rs = session.execute("your query"); -for (Row row : rs) { - // Fetch when there's only half a page left: - if (rs.getAvailableWithoutFetching() == 10 && !rs.isFullyFetched()) { - rs.fetchMoreResults(); // this is asynchronous - } - // Process the row... -} -``` - ### Asynchronous paging In previous versions of the driver, the synchronous and asynchronous APIs returned the same `ResultSet` type. This made asynchronous paging very tricky, because it was very easy to accidentally trigger background synchronous queries (which would defeat the whole purpose of async, -or potentially introduce deadlocks). +and potentially introduce deadlocks). To avoid this problem, the driver's asynchronous API now returns a dedicated [AsyncResultSet]; -iteration only yields the current page, and the next page must be explicitly fetched. Here's how -that translates to our example: +iteration only yields the current page, and the next page must be fetched explicitly. To iterate a +result set in a fully asynchronous manner, you need to compose page futures using the methods of +[CompletionStage]. Here's an example that prints each row on the command line: ```java -CompletionStage futureRs = +CompletionStage resultSetFuture = session.executeAsync("SELECT * FROM myTable WHERE id = 1"); -futureRs.whenComplete(this::processRows); +// The returned stage will complete once all the rows have been printed: +CompletionStage printRowsFuture = resultSetFuture.thenCompose(this::printRows); -void processRows(AsyncResultSet rs, Throwable error) { - if (error != null) { - // The query failed, process the error +private CompletionStage printRows(AsyncResultSet resultSet) { + for (Row row : resultSet.currentPage()) { + System.out.println(row.getFormattedContents()); + } + if (resultSet.hasMorePages()) { + return resultSet.fetchNextPage().thenCompose(this::printRows); } else { - for (Row row : rs.currentPage()) { - // Process the row... - } - if (rs.hasMorePages()) { - rs.fetchNextPage().whenComplete(this::processRows); - } + return CompletableFuture.completedFuture(null); } } ``` +If you need to propagate state throughout the iteration, add parameters to the callback. Here's an +example that counts the number of rows (obviously this is contrived, you would use `SELECT COUNT(*)` +instead of doing this client-side, but it illustrates the basic principle): + +```java +CompletionStage resultSetFuture = + session.executeAsync("SELECT * FROM myTable WHERE id = 1"); +CompletionStage countFuture = resultSetFuture.thenCompose(rs -> countRows(rs, 0)); + +private CompletionStage countRows(AsyncResultSet resultSet, int previousPagesCount) { + int count = previousPagesCount; + for (Row row : resultSet.currentPage()) { + count += 1; + } + if (resultSet.hasMorePages()) { + int finalCount = count; // need a final variable to use in the lambda below + return resultSet.fetchNextPage().thenCompose(rs -> countRows(rs, finalCount)); + } else { + return CompletableFuture.completedFuture(count); + } +} +``` + +See [Asynchronous programming](../async/) for more tips about the async API. ### Saving and reusing the paging state @@ -129,53 +168,117 @@ The driver exposes a *paging state* for that: ResultSet rs = session.execute("your query"); ByteBuffer pagingState = rs.getExecutionInfo().getPagingState(); +// Finish processing the current page +while (rs.getAvailableWithoutFetching() > 0) { + Row row = rs.one(); + // process the row +} + // Later: SimpleStatement statement = SimpleStatement.builder("your query").setPagingState(pagingState).build(); session.execute(statement); ``` +Note the loop to finish the current page after we extract the state. The new statement will start at +the beginning of the next page, so we want to make sure we don't leave a gap of unprocessed rows. + The paging state can only be reused with the exact same statement (same query string, same parameters). It is an opaque value that is only meant to be collected, stored and re-used. If you try to modify its contents or reuse it with a different statement, the results are unpredictable. +If you want additional safety, the driver also provides a "safe" wrapper around the raw value: +[PagingState]. + +```java +PagingState pagingState = rs.getExecutionInfo().getSafePagingState(); +``` + +It works in the exact same manner, except that it will throw an `IllegalStateException` if you try +to reinject it in the wrong statement. This allows you to detect the error early, without a +roundtrip to the server. + +Note that, if you use a simple statement and one of the bound values requires a [custom +codec](../custom_codecs), you have to provide a reference to the session when reinjecting the paging +state: + +```java +CustomType value = ... +SimpleStatement statement = SimpleStatement.newInstance("query", value); +// session required here, otherwise you will get a CodecNotFoundException: +statement = statement.setPagingState(pagingState, session); +``` + +This is a small corner case because checking the state requires encoding the values, and a simple +statement doesn't have a reference to the codec registry. If you don't use custom codecs, or if the +statement is a bound statement, you can use the regular `setPagingState(pagingState)`. ### Offset queries -Saving the paging state works well when you only let the user move from one page to the next. But it -doesn't allow random jumps (like "go directly to page 10"), because you can't fetch a page unless -you have the paging state of the previous one. Such a feature would require *offset queries*, but -they are not natively supported by Cassandra (see -[CASSANDRA-6511](https://issues.apache.org/jira/browse/CASSANDRA-6511)). The rationale is that -offset queries are inherently inefficient (the performance will always be linear in the number of -rows skipped), so the Cassandra team doesn't want to encourage their use. - -If you really want offset queries, you can emulate them client-side. You'll still get linear -performance, but maybe that's acceptable for your use case. For example, if each page holds 10 rows -and you show at most 20 pages, it means that in the worst case you'll fetch 190 extra rows, which is -probably not a big performance hit. - -For example, if the page size is 10, the fetch size is 50, and the user asks for page 12 (rows 110 -to 119): - -* execute the statement a first time (the result set contains rows 0 to 49, but you're not going to - use them, only the paging state); -* execute the statement a second time with the paging state from the first query; -* execute the statement a third time with the paging state from the second query. The result set now - contains rows 100 to 149; -* skip the first 10 rows of the iterator. Read the next 10 rows and discard the remaining ones. - -You'll want to experiment with the fetch size to find the best balance: too small means many -background queries; too big means bigger messages and too many unneeded rows returned (we picked 50 -above for the sake of example, but it's probably too small -- the default is 5000). - -Again, offset queries are inefficient by nature. Emulating them client-side is a compromise when you -think you can get away with the performance hit. We recommend that you: - -* test your code at scale with the expected query patterns, to make sure that your assumptions are - correct; -* set a hard limit on the highest possible page number, to prevent malicious clients from triggering - queries that would skip a huge amount of rows. - -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/ResultSet.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html +Saving the paging state works well when you only let the user move from one page to the next. But in +most Web UIs and REST services, you need paginated results with random access, for example: "given a +page size of 20 elements, fetch page 5". + +Cassandra does not support this natively (see +[CASSANDRA-6511](https://issues.apache.org/jira/browse/CASSANDRA-6511)), because such queries are +inherently linear: the database would have to restart from the beginning every time, and skip +unwanted rows until it reaches the desired offset. + +However, random pagination is a real need for many applications, and linear performance can be a +reasonable trade-off if the cardinality stays low. The driver provides a utility to emulate offset +queries on the client side: [OffsetPager]. + +#### Performance considerations + +For each page that you want to retrieve: + +* you need to re-execute the query, in order to start with a fresh result set; +* you then pass the result to `OffsetPager`, which starts iterating from the beginning, and skips + rows until it reaches the desired offset. + +```java +String query = "SELECT ..."; +OffsetPager pager = new OffsetPager(20); + +// Get page 2: start from a fresh result set, throw away rows 1-20, then return rows 21-40 +ResultSet rs = session.execute(query); +OffsetPager.Page page2 = pager.getPage(rs, 2); + +// Get page 5: start from a fresh result set, throw away rows 1-80, then return rows 81-100 +rs = session.execute(query); +OffsetPager.Page page5 = pager.getPage(rs, 5); +``` + +Note that `getPage` can also process the entity iterables returned by the [mapper](../../mapper/). + +#### Establishing application-level guardrails + +Linear performance should be fine for the values typically encountered in real-world applications: +for example, if the page size is 25 and users never go past page 10, the worst case is only 250 +rows, which is a very small result set. However, we strongly recommend that you implement hard +limits in your application code: if the page number is exposed to the user (for example if it is +passed as a URL parameter), make sure it is properly validated and enforce a maximum, so that an +attacker can't inject a large value that could potentially fetch millions of rows. + +#### Relation with protocol-level paging + +Offset paging has no direct relation to `basic.request.page-size`. Protocol-level paging happens +under the hood, and is completely transparent for offset paging: `OffsetPager` will work the same no +matter how many network roundtrips were needed to fetch the result. You don't need to set the +protocol page size and the logical page size to the same value. + +----- + +The [driver examples] include two complete web service implementations demonstrating forward-only +and offset paging. + +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html +[AsyncPagingIterable.hasMorePages]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncPagingIterable.html#hasMorePages-- +[AsyncPagingIterable.fetchNextPage]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AsyncPagingIterable.html#fetchNextPage-- +[OffsetPager]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/paging/OffsetPager.html +[PagingState]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/PagingState.html + +[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html + +[driver examples]: https://github.com/datastax/java-driver/tree/4.x/examples/src/main/java/com/datastax/oss/driver/examples/paging diff --git a/manual/core/performance/README.md b/manual/core/performance/README.md index 34635c9ed5f..3afb321968e 100644 --- a/manual/core/performance/README.md +++ b/manual/core/performance/README.md @@ -1,3 +1,22 @@ + + ## Performance This page is intended as a checklist for everything related to driver performance. Most of the @@ -300,9 +319,9 @@ You should group your schema changes as much as possible. Every change made from a client will be pushed to all other clients, causing them to refresh their metadata. If you have multiple client instances, it might be a good idea to -[deactivate the metadata](../metadata/schema/#enabling-disabling) on other clients while you apply -the updates, and reactivate it at the end. Reactivating will trigger an immediate refresh, so you -can even ramp this up to avoid a "thundering herd" effect. +[deactivate the metadata](../metadata/schema/#enabling-disabling) on all clients while you apply the +updates, and reactivate it at the end (reactivating will trigger an immediate refresh, so you might +want to ramp up clients to avoid a "thundering herd" effect). Schema changes have to replicate to all nodes in the cluster. To minimize the chance of schema disagreement errors: @@ -345,8 +364,8 @@ possible to reuse the same event loop group for I/O, admin tasks, and even your (the driver's internal code is fully asynchronous so it will never block any thread). The timer is the only one that will have to stay on a separate thread. -[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/AccessibleByName.html -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlIdentifier.html -[CqlSession.prepare(SimpleStatement)]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlSession.html#prepare-com.datastax.oss.driver.api.core.cql.SimpleStatement- -[GenericType]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/reflect/GenericType.html -[Statement.setNode()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/Statement.html#setNode-com.datastax.oss.driver.api.core.metadata.Node- \ No newline at end of file +[AccessibleByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/AccessibleByName.html +[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html +[CqlSession.prepare(SimpleStatement)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html#prepare-com.datastax.oss.driver.api.core.cql.SimpleStatement- +[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html +[Statement.setNode()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setNode-com.datastax.oss.driver.api.core.metadata.Node- diff --git a/manual/core/pooling/README.md b/manual/core/pooling/README.md index e734deebdbb..578de6b4abd 100644 --- a/manual/core/pooling/README.md +++ b/manual/core/pooling/README.md @@ -1,5 +1,38 @@ + + ## Connection pooling +### Quick overview + +One connection pool per node. **Many concurrent requests** per connection (don't tune like a JDBC +pool). + +* `advanced.connection` in the configuration: `max-requests-per-connection`, `pool.local.size`, + `pool.remote.size`. +* metrics (per node): `pool.open-connections`, `pool.in-flight`, `pool.available-streams`, + `pool.orphaned-streams`. +* heartbeat: driver-level keepalive, prevents idle connections from being dropped; + `advanced.heartbeat` in the configuration. + +----- + ### Basics The driver communicates with Cassandra over TCP, using the Cassandra binary protocol. This protocol @@ -45,6 +78,9 @@ datastax-java-driver.advanced.connection { } ``` +Do not change those values unless informed by concrete performance measurements; see the +[Tuning](#tuning) section at the end of this page. + Unlike previous versions of the driver, pools do not resize dynamically. However you can adjust the options at runtime, the driver will detect and apply the changes. @@ -55,8 +91,9 @@ If connections stay idle for too long, they might be dropped by intermediate net keepalive settings might be impractical in some environments. The driver provides application-side keepalive in the form of a connection heartbeat: when a -connection does receive incoming reads for a given amount of time, the driver will simulate activity -by writing a dummy request to it. If that request fails, the connection is trashed and replaced. +connection does not receive incoming reads for a given amount of time, the driver will simulate +activity by writing a dummy request to it. If that request fails, the connection is trashed and +replaced. This feature is enabled by default. Here are the default values in the configuration: @@ -116,20 +153,28 @@ In particular, it's a good idea to keep an eye on those two metrics: connections from opening (either configuration or network issues, or a server-side limitation -- see [CASSANDRA-8086]); * `pool.available-streams`: if this is often close to 0, it's a sign that the pool is getting - saturated. Maybe `max-requests-per-connection` is too low, or more connections should be added. + saturated. Consider adding more connections per node. ### Tuning The driver defaults should be good for most scenarios. +#### Number of requests per connection + In our experience, raising `max-requests-per-connection` above 1024 does not bring any significant improvement: the server is only going to service so many requests at a time anyway, so additional requests are just going to pile up. -Similarly, 1 connection per node is generally sufficient. However, it might become a bottleneck in -very high performance scenarios: all I/O for a connection happens on the same thread, so it's -possible for that thread to max out its CPU core. In our benchmarks, this happened with a -single-node cluster and a high throughput (approximately 80K requests / second / connection). +Lowering the value is not a good idea either. If your goal is to limit the global throughput of the +driver, a [throttler](../throttling) is a better solution. + +#### Number of connections per node + +1 connection per node (`pool.local.size` or `pool.remote.size`) is generally sufficient. However, it +might become a bottleneck in very high performance scenarios: all I/O for a connection happens on +the same thread, so it's possible for that thread to max out its CPU core. In our benchmarks, this +happened with a single-node cluster and a high throughput (approximately 80K requests / second / +connection). It's unlikely that you'll run into this issue: in most real-world deployments, the driver connects to more than one node, so the load will spread across more I/O threads. However if you suspect that @@ -138,11 +183,11 @@ you experience the issue, here's what to look out for: * the driver throughput plateaus but the process does not appear to max out any system resource (in particular, overall CPU usage is well below 100%); * one of the driver's I/O threads maxes out its CPU core. You can see that with a profiler, or - OS-level tools like `pidstat -tu` on Linux. With the default configuration, I/O threads are called + OS-level tools like `pidstat -tu` on Linux. By default, I/O threads are named `-io-`. Try adding more connections per node. Thanks to the driver's hot-reload mechanism, you can do that -at runtime and see the effects immediately. +at runtime and see the effects immediately. -[CqlSession]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlSession.html -[CASSANDRA-8086]: https://issues.apache.org/jira/browse/CASSANDRA-8086 \ No newline at end of file +[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html +[CASSANDRA-8086]: https://issues.apache.org/jira/browse/CASSANDRA-8086 diff --git a/manual/core/query_timestamps/README.md b/manual/core/query_timestamps/README.md index 615bd282c7a..4498afe21c4 100644 --- a/manual/core/query_timestamps/README.md +++ b/manual/core/query_timestamps/README.md @@ -1,5 +1,37 @@ + + ## Query timestamps +### Quick overview + +Defines the order in which mutations are applied on the server. Ways to set it (by order of +precedence, higher priority first): + +* `USING TIMESTAMP` in the query string. +* programmatically with [Statement.setQueryTimestamp()]. +* timestamp generator: `advanced.timestamp-generator` in the configuration. Defaults to session-wide + monotonic, also available: per-thread monotonic, server-side, or write your own. +* if the generator didn't set it, assigned server-side. + +----- + In Cassandra, each mutation has a microsecond-precision timestamp, which is used to order operations relative to each other. @@ -174,8 +206,9 @@ Here is the order of precedence of all the methods described so far: 3. otherwise, if the timestamp generator assigned a timestamp, use it; 4. otherwise, let the server assign the timestamp. -[TimestampGenerator]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/time/TimestampGenerator.html +[TimestampGenerator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/time/TimestampGenerator.html [gettimeofday]: http://man7.org/linux/man-pages/man2/settimeofday.2.html -[JNR]: https://github.com/jnr/jnr-ffi +[JNR]: https://github.com/jnr/jnr-posix [Lightweight transactions]: https://docs.datastax.com/en/dse/6.0/cql/cql/cql_using/useInsertLWT.html +[Statement.setQueryTimestamp()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setQueryTimestamp-long- diff --git a/manual/core/reactive/README.md b/manual/core/reactive/README.md new file mode 100644 index 00000000000..37a2e3411b8 --- /dev/null +++ b/manual/core/reactive/README.md @@ -0,0 +1,410 @@ + + +## Reactive Style Programming + +The driver provides built-in support for reactive queries. The [CqlSession] interface extends +[ReactiveSession], which adds specialized methods to execute requests expressed in [reactive +streams]. + +Notes: + +* Reactive capabilities require the [Reactive Streams API] to be present on the classpath. The + driver has a dependency on that library, but if your application does not use reactive queries at + all, it is possible to exclude it to minimize the number of runtime dependencies. If the library + cannot be found at runtime, reactive queries won't be available, and a warning will be logged, but + the driver will otherwise operate normally (this is also valid for OSGi deployments). +* For historical reasons, reactive-related driver types reside in a package prefixed with `dse`; + however, reactive queries also work with regular Cassandra. +* The reactive execution model is implemented in a non-blocking fashion: see the manual page on + [non-blocking programming](../non_blocking) for details. + +### Overview + +`ReactiveSession` exposes two public methods: + +```java +ReactiveResultSet executeReactive(String query); +ReactiveResultSet executeReactive(Statement statement); +``` + +Both methods return a [ReactiveResultSet], which is the reactive streams version of a regular +[ResultSet]. In other words, a `ReactiveResultSet` is a [Publisher] for query results. + +When subscribing to and consuming from a `ReactiveResultSet`, there are two important caveats to +bear in mind: + +1. By default, all `ReactiveResultSet` implementations returned by the driver are cold, unicast, + single-subscription-only publishers. In other words, they do not support multiple subscribers; + consider caching the results produced by such publishers if you need to consume them by more than + one downstream subscriber. We provide a few examples of caching further in this document. +2. Also, note that reactive result sets may emit items to their subscribers on an internal driver IO + thread. Subscriber implementors are encouraged to abide by [Reactive Streams Specification rule + 2.2] and avoid performing heavy computations or blocking calls inside `onNext` calls, as doing so + could slow down the driver and impact performance. Instead, they should asynchronously dispatch + received signals to their processing logic. + +### Basic usage + +The examples in this page make usage of [Reactor], a popular reactive library, but they should be +easily adaptable to any other library implementing the concepts of reactive streams. + +#### Reading in reactive style + +The following example reads from a table and prints all the returned rows to the console. In case of +error, a `DriverException` is thrown and its stack trace is printed to standard error: + +```java +try (CqlSession session = ...) { + Flux.from(session.executeReactive("SELECT ...")) + .doOnNext(System.out::println) + .blockLast(); +} catch (DriverException e) { + e.printStackTrace(); +} +``` + +#### Writing in reactive style + +The following example inserts rows into a table after printing the queries to the console, stopping +at the first error, if any. Again, in case of error, a `DriverException` is thrown: + +```java +try (CqlSession session = ...) { + Flux.just("INSERT ...", "INSERT ...", "INSERT ...", ...) + .doOnNext(System.out::println) + .flatMap(session::executeReactive) + .blockLast(); +} catch (DriverException e) { + e.printStackTrace(); +} +``` + +Note that when a statement is executed reactively, the actual request is only triggered when the +`ReactiveResultSet` is subscribed to; in other words, when the `executeReactive` method returns, +_nothing has been executed yet_. This is why the write example above uses a `flatMap` operator, +which takes care of subscribing to each `ReactiveResultSet` returned by successive calls to +`session.executeReactive`. A common pitfall is to use an operator that silently ignores the returned +`ReactiveResultSet`; for example, the code below seems correct, but will not execute any query: + +```java +// DON'T DO THIS +Flux.just("INSERT INTO ...") + // The returned ReactiveResultSet is not subscribed to + .doOnNext(session::executeReactive) + .blockLast(); +``` + +Since a write query does not return any rows, it may appear difficult to count the number of rows +written to the database. Hopefully most reactive libraries have operators that are useful in these +scenarios. The following example demonstrates how to achieve this goal with Reactor: + +```java +Flux> stmts = ...; +long count = + stmts + .flatMap( + stmt -> + Flux.from(session.executeReactive(stmt)) + // dummy cast, since result sets are always empty for write queries + .cast(Integer.class) + // flow will always be empty, so '1' will be emitted for each query + .defaultIfEmpty(1)) + .count() + .block(); +System.out.printf("Executed %d write statements%n", count); +``` + +### Accessing query metadata + +`ReactiveResultSet` exposes useful information about request execution and query metadata: + +```java +Publisher getColumnDefinitions(); +Publisher getExecutionInfos(); +Publisher wasApplied(); +``` + +Refer to the javadocs of [getColumnDefinitions], [getExecutionInfos] and [wasApplied] for more +information on these methods. + +To inspect the contents of the above publishers, simply subscribe to them. Note that these +publishers cannot complete before the query itself completes; if the query fails, then these +publishers will fail with the same error. + +The following example executes a query, then prints all the available metadata to the console: + +```java +ReactiveResultSet rs = session.executeReactive("SELECT ..."); +// execute the query first +Flux.from(rs).blockLast(); +// then retrieve query metadata +System.out.println("Column definitions: "); +Mono.from(rs.getColumnDefinitions()).doOnNext(System.out::println).block(); +System.out.println("Execution infos: "); +Flux.from(rs.getExecutionInfos()).doOnNext(System.out::println).blockLast(); +System.out.println("Was applied: "); +Mono.from(rs.wasApplied()).doOnNext(System.out::println).block(); +``` + +Note that it is also possible to inspect query metadata at row level. Each row returned by a +reactive query execution implements [`ReactiveRow`][ReactiveRow], the reactive equivalent of a +[`Row`][Row]. + +`ReactiveRow` exposes the same kind of query metadata and execution info found in +`ReactiveResultSet`, but for each individual row: + +```java +ColumnDefinitions getColumnDefinitions(); +ExecutionInfo getExecutionInfo(); +boolean wasApplied(); +``` + +Refer to the javadocs of [`getColumnDefinitions`][ReactiveRow.getColumnDefinitions], +[`getExecutionInfo`][ReactiveRow.getExecutionInfo] and [`wasApplied`][ReactiveRow.wasApplied] for +more information on these methods. + +The following example executes a query and, for each row returned, prints the coordinator that +served that row, then retrieves all the coordinators that were contacted to fulfill the query and +prints them to the console: + +```java +Iterable coordinators = Flux.from(session.executeReactive("SELECT ...")) + .doOnNext( + row -> + System.out.printf( + "Row %s was obtained from coordinator %s%n", + row, + row.getExecutionInfo().getCoordinator())) + .map(ReactiveRow::getExecutionInfo) + // dedup by coordinator (note: this is dangerous on a large result set) + .groupBy(ExecutionInfo::getCoordinator) + .map(GroupedFlux::key) + .toIterable(); +System.out.println("Contacted coordinators: " + coordinators); +``` + +### Advanced topics + +#### Applying backpressure + +One of the key features of reactive programming is backpressure. + +Unfortunately, the Cassandra native protocol does not offer proper support for exchanging +backpressure information between client and server over the network. Cassandra is able, since +version 3.10, to [throttle clients](https://issues.apache.org/jira/browse/CASSANDRA-9318) but at the +time of writing, there is no proper [client-facing backpressure +mechanism](https://issues.apache.org/jira/browse/CASSANDRA-11380) available. + +When reading from Cassandra, this shouldn't however be a problem for most applications. Indeed, in a +read scenario, Cassandra acts as a producer, and the driver is a consumer; in such a setup, if a +downstream subscriber is not able to cope with the throughput, the driver would progressively adjust +the rate at which it requests more pages from the server, thus effectively regulating the server +throughput to match the subscriber's. The only caveat is if the subscriber is really too slow, which +could eventually trigger a query timeout, be it on the client side (`DriverTimeoutException`), or on +the server side (`ReadTimeoutException`). + +When writing to Cassandra, the lack of backpressure communication between client and server is more +problematic. Indeed in a write scenario, the driver acts as a producer, and Cassandra is a consumer; +in such a setup, if an upstream producer generates too much data, the driver would blindly send the +write statements to the server as quickly as possible, eventually causing the cluster to become +overloaded or even crash. This usually manifests itself with errors like `WriteTimeoutException`, or +`OverloadedException`. + +It is strongly advised for users to limit the concurrency at which write statements are executed in +write-intensive scenarios. A simple way to achieve this is to use the `flatMap` operator, which, in +most reactive libraries, has an overloaded form that takes a parameter that controls the desired +amount of concurrency. The following example executes a flow of statements with a maximum +concurrency of 10, leveraging the `concurrency` parameter of Reactor's `flatMap` operator: + +```java +Flux> stmts = ...; +stmts.flatMap(session::executeReactive, 10).blockLast(); +``` + +In the example above, the `flatMap` operator will subscribe to at most 10 `ReactiveResultSet` +instances simultaneously, effectively limiting the number of concurrent in-flight requests to 10. +This is usually enough to prevent data from being written too fast. More sophisticated operators are +capable of rate-limiting or throttling the execution of a flow; for example, Reactor offers a +`delayElements` operator that rate-limits the throughput of its upstream publisher. Consult the +documentation of your reactive library for more information. + +As a last resort, it is also possible to limit concurrency at driver level, for example using the +driver's built-in [request throttling] mechanism, although this is usually not required in reactive +applications. See "[Managing concurrency in asynchronous query execution]" in the Developer Guide +for a few examples. + +#### Caching query results + +As stated above, a `ReactiveResultSet` can only be subscribed once. This is an intentional design +decision, because otherwise users could inadvertently trigger a spurious execution of the same query +again when subscribing for the second time to the same `ReactiveResultSet`. + +Let's suppose that we want to compute both the average and the sum of all values from a table +column. The most naive approach would be to create two flows and subscribe to both: + + ```java +// DON'T DO THIS +ReactiveResultSet rs = session.executeReactive("SELECT n FROM ..."); +double avg = Flux.from(rs) + .map(row -> row.getLong(0)) + .reduce(0d, (a, b) -> (a + b / 2.0)) + .block(); +// will fail with IllegalStateException +long sum = Flux.from(rs) + .map(row -> row.getLong(0)) + .reduce(0L, (a, b) -> a + b) + .block(); + ``` + +Unfortunately, the second `Flux` above with terminate immediately with an `onError` signal +encapsulating an `IllegalStateException`, since `rs` was already subscribed to. + +To circumvent this limitation, while still avoiding to query the table twice, the easiest technique +consists in using the `cache` operator that most reactive libraries offer: + +```java +Flux rs = Flux.from(session.executeReactive("SELECT n FROM ...")) + .map(row -> row.getLong(0)) + .cache(); +double avg = rs + .reduce(0d, (a, b) -> (a + b / 2.0)) + .block(); +long sum = rs + .reduce(0L, (a, b) -> a + b) + .block(); +``` + +The above example works just fine. + +The `cache` operator will subscribe at most once to the `ReactiveResultSet`, cache the results, and +serve the cached results to downstream subscribers. This is obviously only possible if your result +set is small and can fit entirely in memory. + +If caching is not an option, most reactive libraries also offer operators that multicast their +upstream subscription to many subscribers on the fly. + +The above example could be rewritten with a different approach as follows: + +```java +Flux rs = Flux.from(session.executeReactive("SELECT n FROM ...")) + .map(row -> row.getLong(0)) + .publish() // multicast upstream to all downstream subscribers + .autoConnect(2); // wait until two subscribers subscribe +long sum = rs + .reduce(0L, (a, b) -> a + b) + .block(); +double avg = rs + .reduce(0d, (a, b) -> (a + b / 2.0)) + .block(); +``` + +In the above example, the `publish` operator multicasts every `onNext` signal to all of its +subscribers; and the `autoConnect(2)` operator instructs `publish` to wait until it gets 2 +subscriptions before subscribing to its upstream source (and triggering the actual query execution). + +This approach should be the preferred one for large result sets since it does not involve caching +results in memory. + +#### Resuming from and retrying after failed queries + +When executing a flow of statements, any failed query execution would trigger an `onError` signal +and terminate the subscription immediately, potentially preventing subsequent queries from being +executed at all. + +If this behavior is not desired, it is possible to mimic the behavior of a fail-safe system. This +usually involves the usage of operators such as `onErrorReturn` or `onErrorResume`. Consult your +reactive library documentation to find out which operators allow you to intercept failures. + +The following example executes a flow of statements; for each failed execution, the stack trace is +printed to standard error and, thanks to the `onErrorResume` operator, the error is completely +ignored and the flow execution resumes normally: + +```java +Flux> stmts = ...; +stmts.flatMap( + statement -> + Flux.from(session.executeReactive(statement)) + .doOnError(Throwable::printStackTrace) + .onErrorResume(error -> Mono.empty())) + .blockLast(); +``` + +The following example expands on the previous one: for each failed execution, at most 3 retries are +attempted if the error was an ` UnavailableException`, then, if the query wasn't successful after +retrying, a message is logged. Finally, all the errors are collected and the total number of failed +queries is printed to the console: + +```java +Flux> statements = ...; +long failed = statements.flatMap( + stmt -> + Flux.defer(() -> session.executeReactive(stmt)) + // retry at most 3 times on Unavailable + .retry(3, UnavailableException.class::isInstance) + // handle errors + .doOnError( + error -> { + System.err.println("Statement failed: " + stmt); + error.printStackTrace(); + }) + // Collect errors and discard all returned rows + .ignoreElements() + .cast(Long.class) + .onErrorReturn(1L)) + .sum() + .block(); +System.out.println("Total failed queries: " + failed); +``` + +The example above uses `Flux.defer()` to wrap the call to `session.executeReactive()`. This is +required because, as mentioned above, the driver always creates single-subscription-only publishers. +Such publishers are not compatible with operators like `retry` because these operators sometimes +subscribe more than once to the upstream publisher, thus causing the driver to throw an exception. +Hopefully it's easy to solve this issue, and that's exactly what the `defer` operator is designed +for: each subscription to the `defer` operator triggers a distinct call to +`session.executeReactive()`, thus causing the session to re-execute the query and return a brand-new +publisher at every retry. + +Note that the driver already has a [built-in retry mechanism] that can transparently retry failed +queries; the above example should be seen as a demonstration of application-level retries, when a +more fine-grained control of what should be retried, and how, is required. + +[CqlSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html +[ReactiveSession]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveSession.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html +[ReactiveRow]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html +[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html +[getColumnDefinitions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html#getColumnDefinitions-- +[getExecutionInfos]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html#getExecutionInfos-- +[wasApplied]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html#wasApplied-- +[ReactiveRow.getColumnDefinitions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html#getColumnDefinitions-- +[ReactiveRow.getExecutionInfo]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html#getExecutionInfo-- +[ReactiveRow.wasApplied]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveRow.html#wasApplied-- + +[built-in retry mechanism]: ../retries/ +[request throttling]: ../throttling/ + +[Managing concurrency in asynchronous query execution]: https://docs.datastax.com/en/devapp/doc/devapp/driverManagingConcurrency.html] +[Publisher]: https://www.reactive-streams.org/reactive-streams-1.0.2-javadoc/org/reactivestreams/Publisher.html +[reactive streams]: https://en.wikipedia.org/wiki/Reactive_Streams +[Reactive Streams API]: https://github.com/reactive-streams/reactive-streams-jvm +[Reactive Streams Specification rule 2.2]: https://github.com/reactive-streams/reactive-streams-jvm#2.2 +[Reactor]: https://projectreactor.io/ diff --git a/manual/core/reconnection/README.md b/manual/core/reconnection/README.md index 97f342c5017..3eb6dad9c05 100644 --- a/manual/core/reconnection/README.md +++ b/manual/core/reconnection/README.md @@ -1,7 +1,40 @@ + + ## Reconnection -If the driver loses a connection to a node, it tries to re-establish it according to a configurable -policy. This is used in two places: +### Quick overview + +When a connection is lost, try to reestablish it at configured intervals. + +* `advanced.reconnection-policy` in the configuration; defaults to exponential backoff, also + available: constant delay, write your own. +* applies to connection pools and the control connection. +* `advanced.reconnect-on-init` (false by default) controls whether the session tries to reconnect + when it is first created + +----- + +### At runtime + +If a running session loses a connection to a node, it tries to re-establish it according to a +configurable policy. This is used in two places: * [connection pools](../pooling/): for each node, a session has a fixed-size pool of connections to execute user requests. If one or more connections drop, a reconnection gets started for the pool; @@ -54,9 +87,23 @@ is the exponential one with the default values, and the control connection is in [load balancing policy](../load_balancing/) to get a query plan, which happens to start with node4. The connection succeeds, node4 is now the control node and the reconnection stops; * [t = 3] node2's pool tries to open the last missing connection, which succeeds. The pool is back - to its expected size, node2's reconnection stops. + to its expected size, node2's reconnection stops. + +### At init time + +If a session fails to connect when it is first created, the default behavior is to abort and throw +an error immediately. + +If you prefer to retry, you can set the configuration option `advanced.reconnect-on-init` to true. +Instead of failing, the driver will keep attempting to initialize the session at regular intervals, +according to the reconnection policy, until at least one contact point replies. This can be useful +when dealing with containers and microservices. + +Note that the session is not accessible until it is fully ready: the `CqlSessionBuilder.build()` +call — or the future returned by `buildAsync()` — will not complete until the connection +was established. -[ConstantReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.html -[DriverContext]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/context/DriverContext.html -[ExponentialReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.html -[ReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.html \ No newline at end of file +[ConstantReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/internal/core/connection/ConstantReconnectionPolicy.html +[DriverContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/context/DriverContext.html +[ExponentialReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/internal/core/connection/ExponentialReconnectionPolicy.html +[ReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/ReconnectionPolicy.html diff --git a/manual/core/request_id/README.md b/manual/core/request_id/README.md new file mode 100644 index 00000000000..a766a4419af --- /dev/null +++ b/manual/core/request_id/README.md @@ -0,0 +1,48 @@ + + +## Request Id + +### Quick overview + +Users can inject an identifier for each individual CQL request, and such ID can be written in to the [custom payload](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v5.spec) to +correlate a request across the driver and the Apache Cassandra server. + +A request ID generator needs to generate both: +- Session request ID: an identifier for an entire session.execute() call +- Node request ID: an identifier for the execution of a CQL statement against a particular node. There can be one or more node requests for a single session request, due to retries or speculative executions. + +Usage: +* Inject ID generator: set the desired `RequestIdGenerator` in `advanced.request-id.generator.class`. +* Add ID to custom payload: the default behavior of a `RequestIdGenerator` is to add the request ID into the custom payload with the key `request-id`. Override `RequestIdGenerator.getDecoratedStatement` to customize the behavior. + +### Request Id Generator Configuration + +Request ID generator can be declared in the [configuration](../configuration/) as follows: + +``` +datastax-java-driver.advanced.request-id.generator { + class = com.example.app.MyGenerator +} +``` + +To register your own request ID generator, specify the name of the class +that implements `RequestIdGenerator`. + +The generated ID will be added to the log message of `CqlRequestHandler`, and propagated to other classes, e.g. the request trackers. \ No newline at end of file diff --git a/manual/core/request_tracker/README.md b/manual/core/request_tracker/README.md index 2498b07c7ad..c135abfe53f 100644 --- a/manual/core/request_tracker/README.md +++ b/manual/core/request_tracker/README.md @@ -1,32 +1,68 @@ + + ## Request tracker +### Quick overview + +Callback that gets invoked for every request: success or error, globally and for every tried node. + +* `advanced.request-tracker` in the configuration; defaults to none, also available: request logger, + or write your own. +* or programmatically: + [CqlSession.builder().addRequestTracker()][SessionBuilder.addRequestTracker]. + +----- + The request tracker is a session-wide component that gets notified of the latency and outcome of every application request. The driver comes with an optional implementation that logs requests. ### Configuration -The tracker is enabled in the [configuration](../configuration/). The default implementation does -nothing: +Request trackers can be declared in the [configuration](../configuration/) as follows: ``` datastax-java-driver.advanced.request-tracker { - class = NoopRequestTracker + classes = [com.example.app.MyTracker1,com.example.app.MyTracker2] } ``` -To use a different tracker, specify the name of a class that implements [RequestTracker]. One such -class is the built-in request logger (see the next section), you can also create your own -implementation. +By default, no tracker is registered. To register your own trackers, specify the name of a class +that implements [RequestTracker]. One such class is the built-in request logger (see the next +section), but you can also create your own implementation. + +Also, trackers registered via configuration will be instantiated with reflection; they must have a +public constructor taking a `DriverContext` argument. Sometimes you have a tracker instance already in your code, and need to pass it programmatically instead of referencing a class. The session builder has a method for that: ```java -RequestTracker myTracker = ...; -CqlSession session = CqlSession.builder().withRequestTracker(myTracker).build(); +RequestTracker myTracker1 = ...; +RequestTracker myTracker2 = ...; +CqlSession session = CqlSession.builder() + .addRequestTracker(myTracker1) + .addRequestTracker(myTracker2) + .build(); ``` -When you provide the tracker in this manner, the configuration will be ignored. +The two registration methods (programmatic and via the configuration) can be used simultaneously. ### Request logger @@ -35,7 +71,7 @@ requests as "slow" above a given threshold, limit the line size for large querie ``` datastax-java-driver.advanced.request-tracker { - class = RequestLogger + classes = [RequestLogger] logs { # Whether to log successful requests. @@ -106,4 +142,5 @@ all FROM users WHERE user_id=? [v0=42] com.datastax.oss.driver.api.core.servererrors.InvalidQueryException: Undefined column name all ``` -[RequestTracker]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/tracker/RequestTracker.html +[RequestTracker]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/tracker/RequestTracker.html +[SessionBuilder.addRequestTracker]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#addRequestTracker-com.datastax.oss.driver.api.core.tracker.RequestTracker- diff --git a/manual/core/retries/README.md b/manual/core/retries/README.md index cc6ed2f2fde..e92f8e214aa 100644 --- a/manual/core/retries/README.md +++ b/manual/core/retries/README.md @@ -1,20 +1,81 @@ + + ## Retries +### Quick overview + +What to do when a request failed on a node: retry (same or other node), rethrow, or ignore. + +* `advanced.retry-policy` in the configuration. Default policy retries at most once, in cases that + have a high chance of success; you can also write your own. +* can have per-profile policies. +* only kicks in if the query is [idempotent](../idempotence). + +----- + When a query fails, it sometimes makes sense to retry it: the error might be temporary, or the query might work on a different node. The driver uses a *retry policy* to determine when and how to retry. -It is defined in the [configuration](../configuration/): - + +### Built-in retry policies + +The driver ships with two retry policies: `DefaultRetryPolicy` –– the default –– and +`ConsistencyDowngradingRetryPolicy`. + +The default retry policy should be preferred in most cases as it only retries when *it is perfectly +safe to do so*, and when *the chances of success are high enough* to warrant a retry. + +`ConsistencyDowngradingRetryPolicy` is provided for cases where the application can tolerate a +temporary degradation of its consistency guarantees. Its general behavior is as follows: if, based +on the information the coordinator returns, retrying the operation with the initially requested +consistency has a chance to succeed, do it. Otherwise, if based on this information, we know that +the initially requested consistency level *cannot be achieved currently*, then: + +* For writes, ignore the exception *if we know the write has been persisted on at least one + replica*. +* For reads, try reading again at a weaker consistency level. + +Keep in mind that this may break invariants! For example, if your application relies on immediate +write visibility by writing and reading at QUORUM only, downgrading a write to ONE could cause that +write to go unnoticed by subsequent reads at QUORUM. Furthermore, this policy doesn't always respect +datacenter locality; for example, it may downgrade LOCAL_QUORUM to ONE, and thus could accidentally +send a write that was intended for the local datacenter to another datacenter. In summary: **only +use this retry policy if you understand the consequences.** + +Since `DefaultRetryPolicy` is already the driver's default retry policy, no special configuration +is required to activate it. To use `ConsistencyDowngradingRetryPolicy` instead, the following +option must be declared in the driver [configuration](../configuration/): + ``` -datastax-java-driver.advanced.retry-policy { - class = DefaultRetryPolicy -} +datastax-java-driver.advanced.retry-policy.class = ConsistencyDowngradingRetryPolicy ``` -The behavior of the default policy will be detailed in the sections below. You can also use your -own policy by specifying the fully-qualified name of a class that implements [RetryPolicy]. +You can also use your own policy by specifying for the above option the fully-qualified name of a +class that implements [RetryPolicy]. + +### Behavior + +The behavior of both policies will be detailed in the sections below. -The policy has several methods that cover different error cases. Each method returns a decision to -indicate what to do next: +The policy has several methods that cover different error cases. Each method returns a +[RetryVerdict]. A retry verdict essentially provides the driver with a [RetryDecision] to indicate +what to do next. There are four possible retry decisions: * retry on the same node; * retry on the next node in the [query plan](../load_balancing/) for this statement; @@ -22,7 +83,7 @@ indicate what to do next: using the asynchronous API); * ignore the exception. That is, mark the request as successful, and return an empty result set. -### onUnavailable +#### `onUnavailableVerdict` A request reached the coordinator, but there weren't enough live replicas to achieve the requested consistency level. The coordinator replied with an `UNAVAILABLE` error. @@ -37,7 +98,14 @@ rationale is that the first coordinator might have been network-isolated from al (thinking they're down), but still able to communicate with the client; in that case, retrying on the same node has almost no chance of success, but moving to the next node might solve the issue. -### onReadTimeout +`ConsistencyDowngradingRetryPolicy` also triggers a maximum of one retry, but instead of trying the +next node, it will downgrade the initial consistency level, if possible, and retry *the same node*. +Note that if it is not possible to downgrade, this policy will rethrow the exception. For example, +if the original consistency level was QUORUM, and 2 replicas were required to achieve a quorum, but +only one replica is alive, then the query will be retried with consistency ONE. If no replica was +alive however, there is no point in downgrading, and the policy will rethrow. + +#### `onReadTimeoutVerdict` A read request reached the coordinator, which initially believed that there were enough live replicas to process it. But one or several replicas were too slow to answer within the predefined @@ -62,7 +130,12 @@ retrieval, not having detected that replica as dead yet. The reasoning is that b the timeout, the dead replica will likely have been detected as dead and the retry has a high chance of success. -### onWriteTimeout +`ConsistencyDowngradingRetryPolicy` behaves like the default policy when enough replicas responded. +If not enough replicas responded however, it will attempt to downgrade the initial consistency +level, and retry *the same node*. If it is not possible to downgrade, this policy will rethrow the +exception. + +#### `onWriteTimeoutVerdict` This is similar to `onReadTimeout`, but for write operations. The reason reads and writes are handled separately is because a read is obviously a non mutating operation, whereas a write is @@ -80,7 +153,20 @@ small subset of nodes in the local datacenter; a timeout usually means that none alive but the coordinator hadn't detected them as dead yet. By the time we get the timeout, the dead nodes will likely have been detected as dead, and the retry has a high chance of success. -### onRequestAborted +`ConsistencyDowngradingRetryPolicy` also triggers a maximum of one retry, but behaves differently: + +* For `SIMPLE` and `BATCH` write types: if at least one replica acknowledged the write, the policy + will assume that the write will be eventually replicated, and decide to ignore the error; in other + words, it will consider that the write already succeeded, albeit with weaker consistency + guarantees: retrying is therefore useless. If no replica acknowledged the write, the policy will + rethrow the error. +* For `UNLOGGED_BATCH` write type: since only part of the batch could have been persisted, the + policy will attempt to downgrade the consistency level and retry *on the same node*. If + downgrading is not possible, the policy will rethrow. +* For `BATCH_LOG` write type: the policy will retry the same node, for the reasons explained above. +* For other write types: the policy will always rethrow. + +#### `onRequestAbortedVerdict` The request was aborted before we could get a response from the coordinator. This can happen in two cases: @@ -93,10 +179,10 @@ cases: This method is only invoked for [idempotent](../idempotence/) statements. Otherwise, the driver bypasses the retry policy and always rethrows the error. -The default policy retries on the next node if the connection was closed, and rethrows (assuming a -driver bug) in all other cases. +Both the default policy and `ConsistencyDowngradingRetryPolicy` retry on the next node if the +connection was closed, and rethrow (assuming a driver bug) in all other cases. -### onErrorResponse +#### `onErrorResponseVerdict` The coordinator replied with an error other than `READ_TIMEOUT`, `WRITE_TIMEOUT` or `UNAVAILABLE`. Namely, this covers [OverloadedException], [ServerError], [TruncateException], @@ -105,7 +191,8 @@ Namely, this covers [OverloadedException], [ServerError], [TruncateException], This method is only invoked for [idempotent](../idempotence/) statements. Otherwise, the driver bypasses the retry policy and always rethrows the error. -The default policy rethrows read and write failures, and retries other errors on the next node. +Both the default policy and `ConsistencyDowngradingRetryPolicy` rethrow read and write failures, +and retry other errors on the next node. ### Hard-coded rules @@ -163,20 +250,21 @@ configuration). Each request uses its declared profile's policy. If it doesn't declare any profile, or if the profile doesn't have a dedicated policy, then the default profile's policy is used. -[AllNodesFailedException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/AllNodesFailedException.html -[ClosedConnectionException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.html -[DriverTimeoutException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/DriverTimeoutException.html -[FunctionFailureException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.html -[HeartbeatException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/connection/HeartbeatException.html -[ProtocolError]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/ProtocolError.html -[OverloadedException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/OverloadedException.html -[QueryValidationException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.html -[ReadFailureException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.html -[ReadTimeoutException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.html -[RetryDecision]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/retry/RetryDecision.html -[RetryPolicy]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/retry/RetryPolicy.html -[ServerError]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/ServerError.html -[TruncateException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/TruncateException.html -[UnavailableException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/UnavailableException.html -[WriteFailureException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.html -[WriteTimeoutException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.html +[AllNodesFailedException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AllNodesFailedException.html +[ClosedConnectionException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/ClosedConnectionException.html +[DriverTimeoutException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/DriverTimeoutException.html +[FunctionFailureException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/FunctionFailureException.html +[HeartbeatException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/HeartbeatException.html +[ProtocolError]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ProtocolError.html +[OverloadedException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/OverloadedException.html +[QueryValidationException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/QueryValidationException.html +[ReadFailureException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ReadFailureException.html +[ReadTimeoutException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ReadTimeoutException.html +[RetryDecision]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/retry/RetryDecision.html +[RetryPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/retry/RetryPolicy.html +[RetryVerdict]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/retry/RetryVerdict.html +[ServerError]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/ServerError.html +[TruncateException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/TruncateException.html +[UnavailableException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/UnavailableException.html +[WriteFailureException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/WriteFailureException.html +[WriteTimeoutException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/servererrors/WriteTimeoutException.html diff --git a/manual/core/shaded_jar/README.md b/manual/core/shaded_jar/README.md index 377b2e0c7cf..8e183c0efb5 100644 --- a/manual/core/shaded_jar/README.md +++ b/manual/core/shaded_jar/README.md @@ -1,39 +1,86 @@ + + ## Using the shaded JAR -The default driver JAR depends on [Netty](http://netty.io/), which is -used internally for networking. +The default `java-driver-core` JAR depends on a number of [third party +libraries](../integration/#driver-dependencies). This can create conflicts if your application +already uses other versions of those same dependencies. -This explicit dependency can be a problem if your application already -uses another Netty version. To avoid conflicts, we provide a "shaded" -version of the JAR, which bundles the Netty classes under a different -package name: +To avoid this, we provide an alternative core artifact that shades [Netty](../integration/#netty), +[Jackson](../integration/#jackson) and [ESRI](../integration/#esri). To use it, replace the +dependency to `java-driver-core` by: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core-shaded - 4.1.0 + ${driver.version} ``` -If you also use the query-builder or some other library that depends on java-driver-core, you need to remove its -dependency to the non-shaded JAR: +If you also use the query-builder, mapper or some other library that depends on java-driver-core, +you need to remove its dependency to the non-shaded JAR: ```xml - com.datastax.oss + org.apache.cassandra java-driver-core-shaded - 4.1.0 + ${driver.version} + - com.datastax.oss + org.apache.cassandra java-driver-query-builder - 4.1.0 + ${driver.version} - com.datastax.oss + org.apache.cassandra java-driver-core ``` + +Notes: + +* the shading process works by moving the libraries under a different package name, and bundling + them directly into the driver JAR. This should be transparent for client applications: the + impacted dependencies are purely internal, their types are not surfaced in the driver's public + API. +* the driver is compatible with all Netty versions in the range `[4.1.7, 4.2.0)` (equal to or higher + than 4.1.7, and lesser than 4.2.0). If you just need a specific version in that range, you can + avoid the need for the shaded JAR by declaring an explicit dependency in your POM: + + ```xml + + org.apache.cassandra + java-driver-core + ${driver.version} + + + + io.netty + netty-handler + 4.1.39.Final + + ``` + + This only works with Netty: for Jackson and ESRI, only the exact version declared in the driver POM + is supported. diff --git a/manual/core/speculative_execution/README.md b/manual/core/speculative_execution/README.md index 22d71264acd..5666d6a1363 100644 --- a/manual/core/speculative_execution/README.md +++ b/manual/core/speculative_execution/README.md @@ -1,5 +1,36 @@ + + ## Speculative query execution +### Quick overview + +Pre-emptively query another node if the current one takes too long to respond. + +* `advanced.speculative-execution-policy` in the configuration. +* disabled by default. Also available: constant delay, or write your own policy. +* can have per-profile policies. +* only kicks in if the query is idempotent. +* creates more traffic: tune your pool and provision your cluster accordingly. + +----- + Sometimes a Cassandra node might be experiencing difficulties (ex: long GC pause) and take longer than usual to reply. Queries sent to that node will experience bad latency. @@ -238,4 +269,4 @@ profiles have the same configuration). Each request uses its declared profile's policy. If it doesn't declare any profile, or if the profile doesn't have a dedicated policy, then the default profile's policy is used. -[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.html \ No newline at end of file +[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/specex/SpeculativeExecutionPolicy.html diff --git a/manual/core/ssl/README.md b/manual/core/ssl/README.md index 9d28cf315e1..913c7bc6c9a 100644 --- a/manual/core/ssl/README.md +++ b/manual/core/ssl/README.md @@ -1,6 +1,37 @@ + + ## SSL -You can secure traffic between the driver and Cassandra with SSL. There are two aspects to that: +### Quick overview + +Secure the traffic between the driver and Cassandra. + +* `advanced.ssl-engine-factory` in the configuration; defaults to none, also available: + config-based, or write your own. +* or programmatically: + [CqlSession.builder().withSslEngineFactory()][SessionBuilder.withSslEngineFactory] or + [CqlSession.builder().withSslContext()][SessionBuilder.withSslContext]. + +----- + +There are two aspects to SSL: * **client-to-node encryption**, where the traffic is encrypted, and the client verifies the identity of the Cassandra nodes it connects to; @@ -63,11 +94,13 @@ If you're using a CA, sign the client certificate with it (see the blog post lin this page). Then the nodes' truststores only need to contain the CA's certificate (which should already be the case if you've followed the steps for inter-node encryption). +`DefaultSslEngineFactory` supports client keystore reloading; see property +`advanced.ssl-engine-factory.keystore-reload-interval`. ### Driver configuration By default, the driver's SSL support is based on the JDK's built-in implementation: JSSE (Java -Secure Socket Extension),. +Secure Socket Extension). To enable it, you need to define an engine factory in the [configuration](../configuration/). @@ -95,6 +128,12 @@ datastax-java-driver { // truststore-password = password123 // keystore-path = /path/to/client.keystore // keystore-password = password123 + + # The duration between attempts to reload the keystore from the contents of the file specified + # by `keystore-path`. This is mainly relevant in environments where certificates have short + # lifetimes and applications are restarted infrequently, since an expired client certificate + # will prevent new connections from being established until the application is restarted. + // keystore-reload-interval = 30 minutes } } ``` @@ -110,7 +149,7 @@ use [JSSE system properties]: -Djavax.net.ssl.keyStorePassword=password123 ``` -#### JSSE, programmatic +#### JSSE, custom factory If you need more control than what system properties allow, you need to write your own engine factory. If you just need specific configuration on the `SSLEngine`, you can extend the default @@ -145,27 +184,53 @@ datastax-java-driver { } ``` -#### Netty +#### JSSE, programmatic -Netty provides a more efficient SSL implementation based on native OpenSSL support. It's possible to -customize the driver to use it instead of JSSE. +You can also provide a factory instance programmatically. This will take precedence over the +configuration: + +```java +SslEngineFactory yourFactory = ... +CqlSession session = CqlSession.builder() + .withSslEngineFactory(yourFactory) + .build(); +``` + +If you are reusing code that configures SSL programmatically, you can use +[ProgrammaticSslEngineFactory] as an easy way to wrap that into a factory instance: + +```java +SSLContext sslContext = ... +String[] cipherSuites = ... +boolean requireHostNameValidation = ... +CqlSession session = + CqlSession.builder() + .withSslEngineFactory( + new ProgrammaticSslEngineFactory( + sslContext, cipherSuites, requireHostNameValidation)) + .build(); +``` + +Finally, there is a convenient shortcut on the session builder if you just need to pass an +`SSLContext`: + +```java +SSLContext sslContext = ... +CqlSession session = CqlSession.builder() + .withSslContext(sslContext) + .build(); +``` -This is an advanced topic and beyond the scope of this document, but here is an overview: +#### Netty-tcnative -1. add a dependency to Netty-tcnative: follow - [these instructions](http://netty.io/wiki/forked-tomcat-native.html); -2. write your own implementation of the driver's `SslHandlerFactory`. This is a higher-level - abstraction than `SslEngineFactory`, that returns a Netty `SslHandler`. You'll build this handler - with Netty's own `SslContext`; -3. write a subclass of `DefaultDriverContext` that overrides `buildSslHandlerFactory()` to return - the custom `SslHandlerFactory` you wrote in step 2. This will cause the driver to completely - ignore the `ssl-engine-factory` options in the configuration; -4. write a subclass of `SessionBuilder` that overrides `buildContext` to return the custom context - that you wrote in step 3. -5. build your session with your custom builder. +Netty supports native integration with OpenSSL / boringssl. The driver does not provide this out of +the box, but with a bit of custom development it is fairly easy to add. See +[SslHandlerFactory](../../developer/netty_pipeline/#ssl-handler-factory) in the developer docs. -Note that this approach relies on the driver's [internal API](../../api_conventions). [dsClientToNode]: https://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/secureSSLClientToNode.html [pickle]: http://thelastpickle.com/blog/2015/09/30/hardening-cassandra-step-by-step-part-1-server-to-server.html -[JSSE system properties]: http://docs.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#Customization \ No newline at end of file +[JSSE system properties]: http://docs.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#Customization +[SessionBuilder.withSslEngineFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withSslEngineFactory-com.datastax.oss.driver.api.core.ssl.SslEngineFactory- +[SessionBuilder.withSslContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withSslContext-javax.net.ssl.SSLContext- +[ProgrammaticSslEngineFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/ssl/ProgrammaticSslEngineFactory.html diff --git a/manual/core/statements/README.md b/manual/core/statements/README.md index 2ea78442155..394e81ae00e 100644 --- a/manual/core/statements/README.md +++ b/manual/core/statements/README.md @@ -1,5 +1,34 @@ + + ## Statements +### Quick overview + +What you pass to `session.execute()`. + +* three types: simple (textual query), bound (prepared) and batch. +* built-in implementations are **immutable**. Setters always return a new object, don't ignore the + result. + +----- + To execute a CQL query, you create a [Statement] instance and pass it to [Session#execute][execute] or [Session#executeAsync][executeAsync]. The driver provides various implementations: @@ -49,7 +78,7 @@ the [configuration](../configuration/). Namely, these are: idempotent flag, quer consistency levels and page size. We recommended the configuration approach whenever possible (you can create execution profiles to capture common combinations of those options). -[Statement]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/Statement.html -[StatementBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/StatementBuilder.html -[execute]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html#execute-com.datastax.oss.driver.api.core.cql.Statement- -[executeAsync]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Session.html#executeAsync-com.datastax.oss.driver.api.core.cql.Statement- +[Statement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html +[StatementBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/StatementBuilder.html +[execute]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#execute-com.datastax.oss.driver.api.core.cql.Statement- +[executeAsync]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Session.html#executeAsync-com.datastax.oss.driver.api.core.cql.Statement- diff --git a/manual/core/statements/batch/README.md b/manual/core/statements/batch/README.md index 2d66fb0a92e..f080fe16ab0 100644 --- a/manual/core/statements/batch/README.md +++ b/manual/core/statements/batch/README.md @@ -1,5 +1,34 @@ + + ## Batch statements +### Quick overview + +Group a set of statements into an atomic operation. + +* create with [BatchStatement.newInstance()] or [BatchStatement.builder()]. +* built-in implementation is **immutable**. Setters always return a new object, don't ignore the + result. + +----- + Use [BatchStatement] to execute a set of queries as an atomic operation (refer to [Batching inserts, updates and deletes][batch_dse] to understand how to use batching effectively): @@ -51,6 +80,8 @@ In addition, simple statements with named parameters are currently not supported due to a [protocol limitation][CASSANDRA-10246] that will be fixed in a future version). If you try to execute such a batch, an `IllegalArgumentException` is thrown. -[BatchStatement]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/BatchStatement.html +[BatchStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BatchStatement.html +[BatchStatement.newInstance()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BatchStatement.html#newInstance-com.datastax.oss.driver.api.core.cql.BatchType- +[BatchStatement.builder()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BatchStatement.html#builder-com.datastax.oss.driver.api.core.cql.BatchType- [batch_dse]: http://docs.datastax.com/en/dse/6.7/cql/cql/cql_using/useBatch.html -[CASSANDRA-10246]: https://issues.apache.org/jira/browse/CASSANDRA-10246 \ No newline at end of file +[CASSANDRA-10246]: https://issues.apache.org/jira/browse/CASSANDRA-10246 diff --git a/manual/core/statements/per_query_keyspace/README.md b/manual/core/statements/per_query_keyspace/README.md index 01ace009512..9a7ffa338c9 100644 --- a/manual/core/statements/per_query_keyspace/README.md +++ b/manual/core/statements/per_query_keyspace/README.md @@ -1,5 +1,33 @@ + + ## Per-query keyspace +### Quick overview + +Specify the keyspace separately instead of hardcoding it in the query string. + +* Cassandra 4+ / DSE 6+. +* only works with simple statements. + +----- + Sometimes it is convenient to send the keyspace separately from the query string, and without switching the whole session to that keyspace either. For example, you might have a multi-tenant setup where identical requests are executed against different keyspaces. @@ -115,6 +143,6 @@ SimpleStatement statement = At some point in the future, when Cassandra 4 becomes prevalent and using a per-query keyspace is the norm, we'll probably deprecate `setRoutingKeyspace()`. -[token-aware routing]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKey-- +[token-aware routing]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/Request.html#getRoutingKey-- -[CASSANDRA-10145]: https://issues.apache.org/jira/browse/CASSANDRA-10145 \ No newline at end of file +[CASSANDRA-10145]: https://issues.apache.org/jira/browse/CASSANDRA-10145 diff --git a/manual/core/statements/prepared/README.md b/manual/core/statements/prepared/README.md index 6157cb8358e..5a87b238cbc 100644 --- a/manual/core/statements/prepared/README.md +++ b/manual/core/statements/prepared/README.md @@ -1,5 +1,40 @@ + + ## Prepared statements +### Quick overview + +Prepare a query string once, reuse with different values. More efficient than simple statements for +queries that are used often. + +* create the prepared statement with `session.prepare()`, call [bind()][PreparedStatement.bind] or + [boundStatementBuilder()][PreparedStatement.boundStatementBuilder] on it to create executable + statements. +* the session has a built-in cache, it's OK to prepare the same string twice. +* values: `?` or `:name`, fill with `setXxx(int, ...)` or `setXxx(String, ...)` respectively. +* some values can be left unset with Cassandra 2.2+ / DSE 5+. +* built-in implementation is **immutable**. Setters always return a new object, don't ignore the + result. + +----- + Use prepared statements for queries that are executed multiple times in your application: ```java @@ -55,6 +90,8 @@ client driver Cassandra |<--------------------------------| | ``` +### Advantages of prepared statements + Beyond saving a bit of parsing overhead on the server, prepared statements have other advantages; the `PREPARED` response also contains useful metadata about the CQL query: @@ -181,19 +218,19 @@ parameters. #### Unset values With [native protocol](../../native_protocol/) V3, all variables must be bound. With native protocol -V4 or above, variables can be left unset, in which case they will be ignored (no tombstones will be -generated). If you're reusing a bound statement, you can use the `unset` method to unset variables -that were previously set: +V4 (Cassandra 2.2 / DSE 5) or above, variables can be left unset, in which case they will be ignored +(no tombstones will be generated). If you're reusing a bound statement, you can use the `unset` +method to unset variables that were previously set: ```java BoundStatement bound = ps1.bind() .setString("sku", "324378") .setString("description", "LCD screen"); -// Positional: +// Named: bound = bound.unset("description"); -// Named: +// Positional: bound = bound.unset(1); ``` @@ -312,8 +349,10 @@ With Cassandra 4 and [native protocol](../../native_protocol/) v5, this issue is new version with the response; the driver updates its local cache transparently, and the client can observe the new columns in the result set. -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[Session.prepare]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlSession.html#prepare-com.datastax.oss.driver.api.core.cql.SimpleStatement- +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[Session.prepare]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlSession.html#prepare-com.datastax.oss.driver.api.core.cql.SimpleStatement- [CASSANDRA-10786]: https://issues.apache.org/jira/browse/CASSANDRA-10786 [CASSANDRA-10813]: https://issues.apache.org/jira/browse/CASSANDRA-10813 -[guava eviction]: https://github.com/google/guava/wiki/CachesExplained#reference-based-eviction \ No newline at end of file +[guava eviction]: https://github.com/google/guava/wiki/CachesExplained#reference-based-eviction +[PreparedStatement.bind]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/PreparedStatement.html#bind-java.lang.Object...- +[PreparedStatement.boundStatementBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/PreparedStatement.html#boundStatementBuilder-java.lang.Object...- diff --git a/manual/core/statements/simple/README.md b/manual/core/statements/simple/README.md index e8f066d89de..13ddbb7a389 100644 --- a/manual/core/statements/simple/README.md +++ b/manual/core/statements/simple/README.md @@ -1,5 +1,36 @@ + + ## Simple statements +### Quick overview + +For one-off executions of a raw query string. + +* create with [SimpleStatement.newInstance()] or [SimpleStatement.builder()]. +* values: `?` or `:name`, fill with `setPositionalValues()` or `setNamedValues()` respectively. + Driver has to guess target CQL types, this can lead to ambiguities. +* built-in implementation is **immutable**. Setters always return a new object, don't ignore the + result. + +----- + Use [SimpleStatement] for queries that will be executed only once (or just a few times): ```java @@ -170,4 +201,6 @@ session.execute( Or you could also use [prepared statements](../prepared/), which don't have this limitation since parameter types are known in advance. -[SimpleStatement]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/SimpleStatement.html +[SimpleStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SimpleStatement.html +[SimpleStatement.newInstance()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SimpleStatement.html#newInstance-java.lang.String- +[SimpleStatement.builder()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SimpleStatement.html#builder-java.lang.String- diff --git a/manual/core/temporal_types/README.md b/manual/core/temporal_types/README.md index 9d690ad9fab..6542d5b8dac 100644 --- a/manual/core/temporal_types/README.md +++ b/manual/core/temporal_types/README.md @@ -1,8 +1,38 @@ + + ## Temporal types +### Quick overview + This page provides more details about the various CQL time types, and the Java types they are mapped to in the driver. +| CQL | Java | | +|---|---|---| +|`date` | `java.time.LocalDate` || +|`time` | `java.time.LocalTime` || +|`timestamp` | `java.time.Instant` | No time zone. Use `Instant.atZone` or register [TypeCodecs.ZONED_TIMESTAMP_SYSTEM], [TypeCodecs.ZONED_TIMESTAMP_UTC] or [TypeCodecs.zonedTimestampAt()] | +|`duration` | [CqlDuration] | Custom driver type; can't be accurately represented by any of the `java.time` types. | + +----- + ### Date and time CQL types `date` and `time` map directly to `java.time.LocalDate` and `java.time.LocalTime`. @@ -135,7 +165,7 @@ System.out.println(dateTime.minus(CqlDuration.from("1h15s15ns"))); // prints "2018-10-03T22:59:44.999999985-07:00[America/Los_Angeles]" ``` -[CqlDuration]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/CqlDuration.html -[TypeCodecs.ZONED_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#ZONED_TIMESTAMP_SYSTEM -[TypeCodecs.ZONED_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#ZONED_TIMESTAMP_UTC -[TypeCodecs.zonedTimestampAt()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#zonedTimestampAt-java.time.ZoneId- \ No newline at end of file +[CqlDuration]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/CqlDuration.html +[TypeCodecs.ZONED_TIMESTAMP_SYSTEM]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#ZONED_TIMESTAMP_SYSTEM +[TypeCodecs.ZONED_TIMESTAMP_UTC]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#ZONED_TIMESTAMP_UTC +[TypeCodecs.zonedTimestampAt()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/TypeCodecs.html#zonedTimestampAt-java.time.ZoneId- diff --git a/manual/core/throttling/README.md b/manual/core/throttling/README.md index 81628e9c2ee..275c0cb5b40 100644 --- a/manual/core/throttling/README.md +++ b/manual/core/throttling/README.md @@ -1,5 +1,35 @@ + + ## Request throttling +### Quick overview + +Limit session throughput. + +* `advanced.throttler` in the configuration; defaults to pass-through (no throttling), also + available: concurrency-based (max simultaneous requests), rate-based (max requests per time unit), + or write your own. +* metrics: `throttling.delay`, `throttling.queue-size`, `throttling.errors`. + +----- + Throttling allows you to limit how many requests a session can execute concurrently. This is useful if you have multiple applications connecting to the same Cassandra cluster, and want to enforce some kind of SLA to ensure fair resource allocation. @@ -47,9 +77,7 @@ This is a no-op implementation: requests are simply allowed to proceed all the t Note that you will still hit a limit if all your connections run out of stream ids. In that case, requests will fail with an [AllNodesFailedException], with the `getErrors()` method returning a -[BusyConnectionException] for each node. - - +[BusyConnectionException] for each node. See the [connection pooling](../pooling/) page. #### Concurrency-based @@ -76,9 +104,7 @@ function of the number of connected nodes and the `connection.pool.*.size` and `connection.max-requests-per-connection` configuration options. Keep in mind that aggressive speculative executions and timeout options can inflate stream id consumption, so keep a safety margin. One good way to get this right is to track the `pool.available-streams` [metric](../metrics) -on every node, and make sure it never reaches 0. - - +on every node, and make sure it never reaches 0. See the [connection pooling](../pooling/) page. #### Rate-based @@ -138,6 +164,6 @@ datastax-java-driver { If you enable `throttling.delay`, make sure to also check the associated extra options to correctly size the underlying histograms (`metrics.session.throttling.delay.*`). -[RequestThrottlingException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/RequestThrottlingException.html -[AllNodesFailedException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/AllNodesFailedException.html -[BusyConnectionException]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/connection/BusyConnectionException.html \ No newline at end of file +[RequestThrottlingException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/RequestThrottlingException.html +[AllNodesFailedException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/AllNodesFailedException.html +[BusyConnectionException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/connection/BusyConnectionException.html diff --git a/manual/core/tracing/README.md b/manual/core/tracing/README.md index 512860a156a..f9beca8e49b 100644 --- a/manual/core/tracing/README.md +++ b/manual/core/tracing/README.md @@ -1,5 +1,37 @@ + + ## Query tracing +### Quick overview + +Detailed information about the server-side internals for a given query. + +* disabled by default, must enable per statement with [Statement.setTracing()] or + [StatementBuilder.setTracing()]. +* retrieve with [ResultSet.getExecutionInfo().getTracingId()][ExecutionInfo.getTracingId()] and + [getQueryTrace()][ExecutionInfo.getQueryTrace()]. +* `advanced.request.trace` in the configuration: fine-grained control over how the driver fetches + the trace data. + +----- + To help troubleshooting performance, Cassandra offers the ability to *trace* a query, in other words capture detailed information about the the internal operations performed by all nodes in the cluster in order to build the response. @@ -100,5 +132,9 @@ for (TraceEvent event : trace.getEvents()) { If you call `getQueryTrace()` for a statement that didn't have tracing enabled, an exception is thrown. -[ExecutionInfo]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html -[QueryTrace]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/cql/QueryTrace.html +[ExecutionInfo]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html +[QueryTrace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/QueryTrace.html +[Statement.setTracing()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Statement.html#setTracing-boolean- +[StatementBuilder.setTracing()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/StatementBuilder.html#setTracing-- +[ExecutionInfo.getTracingId()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getTracingId-- +[ExecutionInfo.getQueryTrace()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getQueryTrace-- diff --git a/manual/core/tuples/README.md b/manual/core/tuples/README.md index fd90c13b8bf..d0684b77569 100644 --- a/manual/core/tuples/README.md +++ b/manual/core/tuples/README.md @@ -1,5 +1,36 @@ + + ## Tuples +### Quick overview + +Ordered set of anonymous, typed fields, e.g. `tuple`, `(1, 'a', 1.0)`. + +* `row.getTupleValue()` / `boundStatement.setTupleValue()`. +* positional getters and setters: `tupleValue.getInt(0)`, `tupleValue.setString(1, "a")`... +* getting hold of the [TupleType]: statement or session metadata, `tupleValue.getType()`, or + `DataTypes.tupleOf()`. +* creating a value from a type: `tupleType.newValue()`. + +----- + [CQL tuples][cql_doc] are ordered sets of anonymous, typed fields. They can be used as a column type in tables, or a field type in [user-defined types](../udts/): @@ -127,5 +158,5 @@ BoundStatement bs = [cql_doc]: https://docs.datastax.com/en/cql/3.3/cql/cql_reference/tupleType.html -[TupleType]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/TupleType.html -[TupleValue]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/TupleValue.html +[TupleType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/TupleType.html +[TupleValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/TupleValue.html diff --git a/manual/core/udts/README.md b/manual/core/udts/README.md index 63767f321a6..a22057030ae 100644 --- a/manual/core/udts/README.md +++ b/manual/core/udts/README.md @@ -1,5 +1,40 @@ + + ## User-defined types +### Quick overview + +Ordered set of named, typed fields, e.g. `{ street: '1 Main St', zip: 12345}`. + +* `row.getUdtValue()` / `boundStatement.setUdtValue()`. +* positional or named getters and setters: `udtValue.getString("street")`, + `udtValue.setInt(1, 12345)`... +* getting hold of the [UserDefinedType]: + * statement or session metadata, or `udtValue.getType()`. + * `UserDefinedTypeBuilder` (not recommended, dangerous if you build a type that doesn't match the + database schema). +* creating a value from a type: `userDefinedType.newValue()`. + +----- + + [CQL user-defined types][cql_doc] are ordered sets of named, typed fields. They must be defined in a keyspace: @@ -119,5 +154,5 @@ session.execute(bs); [cql_doc]: https://docs.datastax.com/en/cql/3.3/cql/cql_reference/cqlRefUDType.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/data/UdtValue.html -[UserDefinedType]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/UserDefinedType.html \ No newline at end of file +[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html +[UserDefinedType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/UserDefinedType.html diff --git a/manual/developer/.nav b/manual/developer/.nav new file mode 100644 index 00000000000..0bb954b1293 --- /dev/null +++ b/manual/developer/.nav @@ -0,0 +1,5 @@ +common +native_protocol +netty_pipeline +request_execution +admin diff --git a/manual/developer/README.md b/manual/developer/README.md new file mode 100644 index 00000000000..b6e0bda16ed --- /dev/null +++ b/manual/developer/README.md @@ -0,0 +1,38 @@ + + +## Developer docs + +This section explains how driver internals work. The intended audience is: + +* driver developers and contributors; +* framework authors, or architects who want to write advanced customizations and integrations. + +Most of this material will involve "internal" packages; see [API conventions](../api_conventions/) +for more explanations. + +We recommend reading about the [common infrastructure](common/) first. Then the documentation goes +from lowest to highest level: + +* [Native protocol layer](native_protocol/): binary encoding of the TCP payloads; +* [Netty pipeline](netty_pipeline/): networking and low-level stream management; +* [Request execution](request_execution/): higher-level handling of user requests and responses; +* [Administrative tasks](admin/): everything else (cluster state and metadata). + +If you're reading this on GitHub, the `.nav` file in each directory contains a suggested order. diff --git a/manual/developer/admin/README.md b/manual/developer/admin/README.md new file mode 100644 index 00000000000..0ebd9e2d746 --- /dev/null +++ b/manual/developer/admin/README.md @@ -0,0 +1,342 @@ + + +## Administrative tasks + +Aside from the main task of [executing user requests](../request_execution), the driver also needs +to track cluster state and metadata. This is done with a number of administrative components: + +```ditaa + +---------------+ + | DriverChannel | + +-------+-------+ + |1 + | topology ++-----------------+ query +---------+---------+ events +| TopologyMonitor +------+---->| ControlConnection +-----------------+ ++-----------------+ | +---------+---------+ | + ^ | | | + | | | topology+channel V + get | +---------+ refresh| events +----------+ +node info| | schema | +------------+ EventBus | + | | | | +-+--------+ ++--------+-----+--+ | | ^ ^ +| MetadataManager |<-------+-------------+ | node| | ++--------+-------++ | | state| | + | | | add/remove v events| | + |1 | | node +------------------+ | | + +-----+----+ | +------------+ NodeStateManager +------+ | + | Metadata | | +------------------+ | + +----------+ | | + +-------------------------------------------------------+ + metadata changed events +``` + +Note: the event bus is covered in the [common infrastructure](../common/event_bus) section. + +### Control connection + +The goal of the control connection is to maintain a dedicated `DriverChannel` instance, used to: + +* listen for server-side protocol events: + * topology events (`NEW_NODE`, `REMOVED_NODE`) and status events (`UP`, `DOWN`) are published on + the event bus, to be processed by other components; + * schema events are propagated directly to the metadata manager, to trigger a refresh; +* provide a way to query system tables. In practice, this is used by: + * the topology monitor, to read node information from `system.local` and `system.peers`; + * the metadata manager, to read schema metadata from `system_schema.*`. + +It has its own reconnection mechanism (if the channel goes down, a new one will be opened to another +node in the cluster) and some logic for initialization and shutdown. + +Note that the control connection is really just an implementation detail of the metadata manager and +topology monitor: if those components are overridden with custom versions that use other means to +get their data, the driver will detect it and not initialize the control connection (at the time of +writing, the session also references the control connection directly, but that's a bug: +[JAVA-2473](https://datastax-oss.atlassian.net/browse/JAVA-2473)). + +### Metadata manager + +This component is responsible for maintaining the contents of +[session.getMetadata()](../../core/metadata/). + +One big improvement in driver 4 is that the `Metadata` object is immutable and updated atomically; +this guarantees a consistent view of the cluster at a given point in time. For example, if a +keyspace name is referenced in the token map, there will always be a corresponding +`KeyspaceMetadata` in the schema metadata. + +`MetadataManager` keeps the current `Metadata` instance in a volatile field. Each transition is +managed by a `MetadataRefresh` object that computes the new metadata, along with an optional list of +events to publish on the bus (e.g. table created, keyspace removed, etc.) The new metadata is then +written back to the volatile field. `MetadataManager` follows the [confined inner +class](../common/concurrency/#cold-path) pattern to ensure that all refreshes are applied serially, +from a single admin thread. This guarantees that two refreshes can't start from the same initial +state and overwrite each other. + +There are various types of refreshes targeting nodes, the schema or the token map. + +Note that, unlike driver 3, we only do full schema refreshes. This simplifies the code considerably, +and thanks to debouncing this should not affect performance. The schema refresh process uses a few +auxiliary components that may have different implementations depending on the Cassandra version: + +* `SchemaQueries`: launches the schema queries asynchronously, and assemble the result in a + `SchemaRows`; +* `SchemaParser`: turns the `SchemaRows` into the `SchemaRefresh`. + +When the metadata manager needs node-related data, it queries the topology monitor. When it needs +schema-related data, it uses the control connection directly to issue its queries. + +### Topology monitor + +`TopologyMonitor` abstracts how we get information about nodes in the cluster: + +* refresh the list of nodes; +* refresh an individual node, or load the information of a newly added node; +* check schema agreement; +* emit `TopologyEvent` instances on the bus when we get external signals suggesting topology changes + (node added or removed), or status changes (node down or up). + +The built-in implementation uses the control connection to query `system.local` and `system.peers`, +and listen to gossip events. + +### Node state manager + +`NodeStateManager` tracks the state of the nodes in the cluster. + +We can't simply trust gossip events because they are not always reliable (the coordinator can become +isolated and think other nodes are down). Instead, the driver uses more elaborate rules that combine +external signals with observed internal state: + +* as long as we have an active connection to a node, it is considered up, whatever gossip events + say; +* if all connections to a node are lost, and its pool has started reconnecting, it gets marked down + (we check the reconnection because the pool could have shut down for legitimate reasons, like the + node distance changing to IGNORED); +* a node is marked back up when the driver has successfully reopened at least one connection; +* if the driver is not actively trying to connect to a node (for example if it is at distance + IGNORED), then gossip events are applied directly. + +See the javadocs of `NodeState` and `TopologyEvent`, as well as the `NodeStateManager` +implementation itself, for more details. + +#### Topology events vs. node state events + +These two event types are related, but they're used at different stages: + +* `TopologyEvent` is an external signal about the state of a node (by default, a `TOPOLOGY_CHANGE` + or `STATUS_CHANGE` gossip event received on the control connection). This is considered as a mere + suggestion, that the driver may or may not decide to follow; +* `NodeStateEvent` is an actual decision made by the driver to change a node to a given state. + +`NodeStateManager` essentially transforms topology events, as well as other internal signals, into +node state events. + +In general, other driver components only react to node state events, but there are a few exceptions: +for example, if a connection pool is reconnecting and the next attempt is scheduled in 5 minutes, +but a SUGGEST_UP topology event is emitted, the pool tries to reconnect immediately. + +The best way to find where each event is used is to do a usage search of the event type. + +### How admin components work together + +Most changes to the cluster state will involve the coordinated effort of multiple admin components. +Here are a few examples: + +#### A new node gets added + +```ditaa ++-----------------+ +--------+ +----------------+ +---------------+ +---------------+ +|ControlConnection| |EventBus| |NodeStateManager| |MetadataManager| |TopologyMonitor| ++--------+--------+ +---+----+ +--------+-------+ +-------+-------+ +-------+-------+ + | | | | | ++--------+-------+ | | | | +|Receive NEW_NODE| | | | | +|gossip event | | | | | +| {d}| | | | | ++--------+-------+ | | | | + | | | | | + |TopologyEvent( | | | | + | SUGGEST_ADDED)| | | | + +--------------->| | | | + | |onTopologyEvent| | | + | +-------------->| | | + | | +------+-------+ | | + | | |check node not| | | + | | |known already | | | + | | | {d}| | | + | | +------+-------+ | | + | | | | | + | | | addNode | | + | | +---------------->| | + | | | | getNewNodeInfo | + | | | +---------------->| + | | | | | + | query(SELECT FROM system.peers) | + |<-------------------------------------------------------------------+ + +------------------------------------------------------------------->| + | | | |<----------------+ + | | | +-------+--------+ | + | | | |create and apply| | + | | | |AddNodeRefresh | | + | | | | {d}| | + | | | +-------+--------+ | + | | | | | + | | NodeChangeEvent(ADDED) | | + | |<--------------------------------+ | + | | | | | +``` + +At this point, other driver components listening on the event bus will get notified of the addition. +For example, `DefaultSession` will initialize a connection pool to the new node. + +#### A new table gets created + +```ditaa + +-----------------+ +---------------+ +---------------+ +--------+ + |ControlConnection| |MetadataManager| |TopologyMonitor| |EventBus| + +--------+--------+ +-------+-------+ +-------+-------+ +---+----+ + | | | | ++----------+----------+ | | | +|Receive SCHEMA_CHANGE| | | | +|gossip event | | | | +| {d} | | | | ++----------+----------+ | | | + | | | | + | refreshSchema | | | + +------------------------------->| | | + | |checkSchemaAgreement | | + | +-------------------->| | + | | | | + | query(SELECT FROM system.local/peers) | | + |<-----------------------------------------------------+ | + +----------------------------------------------------->| | + | | | | + | |<--------------------+ | + |query(SELECT FROM system_schema)| | | + |<-------------------------------+ | | + +------------------------------->| | | + | +-------+--------+ | | + | |Parse results | | | + | |Create and apply| | | + | |SchemaRefresh | | | + | | {d}| | | + | +-------+--------+ | | + | | | | + | | TableChangeEvent(CREATED) | + | +---------------------------------->| + | | | | +``` + +#### The last connection to an active node drops + +```ditaa + +-----------+ +--------+ +----------------+ +----+ +---------------+ + |ChannelPool| |EventBus| |NodeStateManager| |Node| |MetadataManager| + +-----+-----+ +---+----+ +-------+--------+ +-+--+ +-------+-------+ + | | | | | + |ChannelEvent(CLOSED) | | | | + +----------------------->| | | | + | |onChannelEvent | | | + +------+-----+ +--------------->| | | + | start | | |decrement | | + |reconnecting| | |openConnections | | + | {d}| | +--------------->| | + +------+-----+ | | | | + |ChannelEvent( | | | | + | RECONNECTION_STARTED) | | | | + +----------------------->| | | | + | |onChannelEvent | | | + | +--------------->| | | + | | |increment | | + | | |reconnections | | + | | +--------------->| | + | | | | | + | | +--------+--------+ | | + | | |detect node has | | | + | | |0 connections and| | | + | | |is reconnecting | | | + | | | {d} | | | + | | +--------+--------+ | | + | | |set state DOWN | | + | | +--------------->| | + | |NodeStateEvent( | | | + | | DOWN) | | | + +------+-----+ |<---------------+ | | + |reconnection| | | | | + | succeeds | | | | | + | {d}| | | | | + +------+-----+ | | | | + |ChannelEvent(OPENED) | | | | + +----------------------->| | | | + | |onChannelEvent | | | + | +--------------->| | | + | | |increment | | + | | |openConnections | | + | | +--------------->| | + | | | | | + | | +--------+--------+ | | + | | |detect node has | | | + | | |1 connection | | | + | | | {d} | | | + | | +--------+--------+ | | + | | | refreshNode | | + | | +---------------------------->| + | | | | | + | | |set state UP | | + | | +--------------->| | + | |NodeStateEvent( | | | + | | UP) | | | + | |<---------------+ | | + |ChannelEvent( | | | | + | RECONNECTION_STOPPED) | | | | + +----------------------->| | | | + | |onChannelEvent | | | + | +--------------->| | | + | | |decrement | | + | | |reconnections | | + | | +--------------->| | + | | | | | +``` + +### Extension points + +#### TopologyMonitor + +This is a standalone component because some users have asked for a way to use their own discovery +service instead of relying on system tables and gossip (see +[JAVA-1082](https://datastax-oss.atlassian.net/browse/JAVA-1082)). + +A custom implementation can be plugged by [extending the +context](../common/context/#overriding-a-context-component) and overriding `buildTopologyMonitor`. +It should: + +* implement the methods of `TopologyMonitor` by querying the discovery service; +* use some notification mechanism (or poll the service periodically) to detect when nodes go up or + down, or get added or removed, and emit the corresponding `TopologyEvent` instances on the bus. + +Read the javadocs for more details; in particular, `NodeInfo` explains how the driver uses the +information returned by the topology monitor. + +#### MetadataManager + +It's less likely that this will be overridden directly. But the schema querying and parsing logic is +abstracted behind two factories that handle the differences between Cassandra versions: +`SchemaQueriesFactory` and `SchemaParserFactory`. These are pluggable by [extending the +context](../common/context/#overriding-a-context-component) and overriding the corresponding +`buildXxx` methods. diff --git a/manual/developer/common/.nav b/manual/developer/common/.nav new file mode 100644 index 00000000000..a841aca40ca --- /dev/null +++ b/manual/developer/common/.nav @@ -0,0 +1,3 @@ +context +concurrency +event_bus diff --git a/manual/developer/common/README.md b/manual/developer/common/README.md new file mode 100644 index 00000000000..13ad8639e62 --- /dev/null +++ b/manual/developer/common/README.md @@ -0,0 +1,28 @@ + + +## Common infrastructure + +This covers utilities or concept that are shared throughout the codebase: + +* the [context](context/) is what glues everything together, and your primary entry point to extend + the driver. +* we explain the two major approaches to deal with [concurrency](concurrency/) in the driver. +* the [event bus](event_bus/) is used to decouple some of the internal components through + asynchronous messaging. diff --git a/manual/developer/common/concurrency/README.md b/manual/developer/common/concurrency/README.md new file mode 100644 index 00000000000..fb493930d6e --- /dev/null +++ b/manual/developer/common/concurrency/README.md @@ -0,0 +1,145 @@ + + +## Concurrency + +The driver is a highly concurrent environment. We try to use thread confinement to simplify the +code, when that does not impact performance. + +### Hot path + +The hot path is everything that happens for a `session.execute` call. In a typical client +application, this is where the driver will likely spend the majority of its time, so it must be +fast. + +Write path: + +1. convert the statement into a protocol-level `Message` (`CqlRequestHandler` constructor); +2. find a node and a connection, and write the message to it (`CqlRequestHandler.sendRequest`); +3. assign a stream id and wrap the message into a frame (`InflightHandler.write`); +4. encode the frame into a binary payload (`FrameEncoder`). + +Read path: + +1. decode the binary payload into a frame (`FrameDecoder`); +2. find the handler that corresponds to the stream id (`InFlightHandler.channelRead`); +3. complete the client's future (`CqlRequestHandler.NodeResponseCallback.onResponse`). + +Various policies are also invoked along the way (load balancing, retry, speculative execution, +timestamp generator...), they are considered on the hot path too. + +Steps 1 and 2 of the write path happen on the client thread, and 3 and 4 on the Netty I/O thread +(which is one of the threads in `NettyOptions.ioEventLoopGroup()`). +On the read path, everything happens on the Netty I/O thread. Beyond that, we want to avoid context +switches for performance reasons: in early prototypes, we tried confining `CqlRequestHandler` to a +particular thread, but that did not work well; so you will find that the code is fairly similar to +driver 3 in terms of concurrency control (reliance on atomic structures, volatile fields, etc). + +Note: code on the hot path should prefer the `TRACE` log level. + +### Cold path + +The cold path is everything else: initialization and shutdown, metadata refreshes, tracking node +states, etc. They will typically be way less frequent than user requests, so we can tolerate a small +performance hit in order to make concurrency easier to handle. + +One pattern we use a lot is a confined inner class: + +```java +public class ControlConnection { + // some content omitted for brevity + + private final EventExecutor adminExecutor; + private final SingleThreaded singleThreaded; + + // Called from other components, from any thread + public void reconnectNow() { + RunOrSchedule.on(adminExecutor, singleThreaded::reconnectNow); + } + + private class SingleThreaded { + private void reconnectNow() { + assert adminExecutor.inEventLoop(); + // this method is only ever called from one thread, much easier to handle concurrency + } + } +} +``` + +Public outer methods such as `reconnectNow()` are called concurrently. But they delegate to a method +of the internal class, that always runs on the same `adminExecutor` thread. `RunOrSchedule.on` calls +the method directly if we're already on the target thread, otherwise it schedules a task. If we need +to propagate a result, the outer method injects a future that the inner method completes. + +`adminExecutor` is picked randomly from `NettyOptions.adminEventExecutorGroup()` at construction +time. + +Confining `SingleThreaded` simplifies the code tremendously: we can use regular, non-volatile +fields, and methods are guaranteed to always run in isolation, eliminating subtle race conditions +(this idea was borrowed from actor systems). + +### Non-blocking + +Whether on the hot or cold path, internal code is almost 100% lock-free. The driver guarantees on +lock-freedom are [detailed](../../../core/non_blocking) in the core manual. + +If an internal component needs to execute a query, it does so asynchronously, and registers +callbacks to process the results. Examples of this can be found in `ReprepareOnUp` and +`DefaultTopologyMonitor` (among others). + +The only place where the driver blocks is when using the synchronous API (methods declared in +[`SyncCqlSession`]), and when calling other synchronous wrapper methods in the public API, for +example, [`ExecutionInfo.getQueryTrace()`]: + +```java +public interface ExecutionInfo { + // some content omitted for brevity + + default QueryTrace getQueryTrace() { + BlockingOperation.checkNotDriverThread(); + return CompletableFutures.getUninterruptibly(getQueryTraceAsync()); + } +} +``` + +When a public API method is blocking, this is generally clearly stated in its javadocs. + +[`ExecutionInfo.getQueryTrace()`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ExecutionInfo.html#getQueryTrace-- +[`SyncCqlSession`]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/SyncCqlSession.html` + +`BlockingOperation` is a utility to check that those methods aren't called on I/O threads, which +could introduce deadlocks. + +Keeping the internals fully asynchronous is another major improvement over driver 3, where internal +requests were synchronous, and required multiple internal executors to avoid deadlocks. + +In driver 4, there are only two executors: `NettyOptions.ioEventLoopGroup()` and +`NettyOptions.adminEventLoopGroup()`, that are guaranteed to never run blocking tasks. They can be +shared with application code, or across multiple sessions, or can even be one and the same (in +theory, it's possible to use a single 1-thread executor, although there's probably no practical +reason to do that). + +To be exhaustive, `NettyOptions.getTimer()` also uses its own thread; we tried scheduling request +timeouts and speculative executions on I/O threads in early alphas, but that didn't perform as well +as Netty's `HashedWheelTimer`. + +So the total number of threads created by a session is +``` +advanced.netty.io-group.size + advanced.netty.admin-group.size + 1 +``` diff --git a/manual/developer/common/context/README.md b/manual/developer/common/context/README.md new file mode 100644 index 00000000000..e20d5ad0ddb --- /dev/null +++ b/manual/developer/common/context/README.md @@ -0,0 +1,141 @@ + + +## Driver context + +The context holds the driver's internal components. It is exposed in the public API as +`DriverContext`, accessible via `session.getContext()`. Internally, the child interface +`InternalDriverContext` adds access to more components; finally, `DefaultDriverContext` is the +implementing class. + +### The dependency graph + +Most components initialize lazily (see `LazyReference`). They also reference each other, typically +by taking the context as a constructor argument, and extracting the dependencies they need: + +```java +public DefaultTopologyMonitor(InternalDriverContext context) { + ... + this.controlConnection = context.getControlConnection(); +} +``` + +This avoids having to handle the initialization order ourselves. It is also convenient for unit +tests: you can run a component in isolation by mocking all of its dependencies. + +Obviously, things won't go well if there are cyclic dependencies; if you make changes to the +context, you can set a system property to check the dependency graph, it will throw if a cycle is +detected (see `CycleDetector`): + +``` +-Dcom.datastax.oss.driver.DETECT_CYCLES=true +``` + +This is disabled by default, because we don't expect it to be very useful outside of testing cycles. + +### Why not use a DI framework? + +As should be clear by now, the context is a poor man's Dependency Injection framework. We +deliberately avoided third-party solutions: + +* to keep things as simple as possible, +* to avoid an additional library dependency, +* to allow end users to access components and add their own (which wouldn't work well with + compile-time approaches like Dagger). + +### Overriding a context component + +The basic approach to plug in a custom internal component is to subclass the context. + +For example, let's say you wrote a custom `NettyOptions` implementation (maybe you have multiple +sessions, and want to reuse the event loop groups instead of recreating them every time): + +```java +public class CustomNettyOptions implements NettyOptions { + ... +} +``` + +In the default context, here's how the component is managed: + +```java +public class DefaultDriverContext { + + // some content omitted for brevity + + private final LazyReference nettyOptionsRef = + new LazyReference<>("nettyOptions", this::buildNettyOptions, cycleDetector); + + protected NettyOptions buildNettyOptions() { + return new DefaultNettyOptions(this); + } + + @NonNull + @Override + public NettyOptions getNettyOptions() { + return nettyOptionsRef.get(); + } +} +``` + +To switch in your implementation, you only need to override the build method: + +```java +public class CustomContext extends DefaultDriverContext { + + public CustomContext(DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { + super(configLoader, programmaticArguments); + } + + @Override + protected NettyOptions buildNettyOptions() { + return new CustomNettyOptions(this); + } +} +``` + +Then you need a way to create a session that uses your custom context. The session builder is +extensible as well: + +```java +public class CustomBuilder extends SessionBuilder { + + @Override + protected DriverContext buildContext( + DriverConfigLoader configLoader, ProgrammaticArguments programmaticArguments) { + return new CustomContext(configLoader, programmaticArguments); + } + + @Override + protected CqlSession wrap(@NonNull CqlSession defaultSession) { + // Nothing to do here, nothing changes on the session type + return defaultSession; + } +} +``` + +Finally, you can use your custom builder like the regular `CqlSession.builder()`, it inherits all +the methods: + +```java +CqlSession session = new CustomBuilder() + .addContactPoint(new InetSocketAddress("1.2.3.4", 9042)) + .withLocalDatacenter("datacenter1") + .build(); +``` diff --git a/manual/developer/common/event_bus/README.md b/manual/developer/common/event_bus/README.md new file mode 100644 index 00000000000..74729ac6656 --- /dev/null +++ b/manual/developer/common/event_bus/README.md @@ -0,0 +1,62 @@ + + +## Event bus + +`EventBus` is a bare-bones messaging mechanism, to decouple components from each other, and +broadcast messages to more than one component at a time. + +Producers fire events on the bus; consumers register to be notified for a particular event class. +For example, `DefaultDriverConfigLoader` reloads the config periodically, and fires an event if it +detects a change: + +```java +boolean changed = driverConfig.reload(configSupplier.get()); +if (changed) { + LOG.info("[{}] Detected a configuration change", logPrefix); + eventBus.fire(ConfigChangeEvent.INSTANCE); +} +``` + +This allows other components, such as `ChannelPool`, to react to config changes dynamically: + +```java +eventBus.register( + ConfigChangeEvent.class, RunOrSchedule.on(adminExecutor, this::onConfigChanged)); + +private void onConfigChanged(ConfigChangeEvent event) { + assert adminExecutor.inEventLoop(); + // resize re-reads the pool size from the configuration and does nothing if it hasn't changed, + // which is exactly what we want. + resize(distance); +} +``` + +For simplicity, the implementation makes the following assumptions: + +* events are propagated synchronously: if their processing needs to be delayed or rescheduled to + another thread, it's the consumer's responsibility (see how the pool uses `RunOrSchedule` in the + example above); +* callbacks are not polymorphic: you must register for the exact event class. For example, if you + have `eventBus.register(B.class, callback)` and fire an `A extends B`, the callback won't catch + it (internally, this allows direct lookups instead of traversing all registered callbacks with an + `instanceof` check). + +Those choices have been good enough for the needs of the driver. That's why we use a custom +implementation rather than something more sophisticated like Guava's event bus. diff --git a/manual/developer/native_protocol/README.md b/manual/developer/native_protocol/README.md new file mode 100644 index 00000000000..b96553fc51b --- /dev/null +++ b/manual/developer/native_protocol/README.md @@ -0,0 +1,197 @@ + + +## Native protocol layer + +The native protocol layer encodes protocol messages into binary, before they are sent over the +network. + +This part of the code lives in its own project: +[native-protocol](https://github.com/datastax/native-protocol). We extracted it to make it reusable +([Simulacron](https://github.com/datastax/simulacron) also uses it). + +The protocol specifications are available in +[native-protocol/src/main/resources](https://github.com/datastax/native-protocol/tree/1.x/src/main/resources). +These files originally come from Cassandra, we copy them over for easy access. Authoritative specifications can +always be found in [cassandra/doc](https://github.com/apache/cassandra/tree/trunk/doc). + + +For a broad overview of how protocol types are used in the driver, let's step through an example: + +* the user calls `session.execute()` with a `SimpleStatement`. The protocol message for a + non-prepared request is `QUERY`; +* `CqlRequestHandler` uses `Conversions.toMessage` to convert the statement into a + `c.d.o.protocol.internal.request.Query`; +* `InflightHandler.write` assigns a stream id to that message, and wraps it into a + `c.d.o.protocol.internal.Frame`; +* `FrameEncoder` uses `c.d.o.protocol.internal.FrameCodec` to convert the frame to binary. + +(All types prefixed with `c.d.o.protocol.internal` belong to the native-protocol project.) + +A similar process happens on the response path: decode the incoming binary payload into a protocol +message, then convert the message into higher-level driver objects: `ResultSet`, `ExecutionInfo`, +etc. + +### Native protocol types + +#### Messages + +Every protocol message is identified by an opcode, and has a corresponding `Message` subclass. + +A `Frame` wraps a message to add metadata, such as the protocol version and stream id. + +```ditaa ++-------+ contains +------------+ +| Frame +--------->+ Message + ++-------+ +------------+ + | int opcode | + +--+---------+ + | + | +---------+ + +----+ Query | + | +---------+ + | + | +---------+ + +----+ Execute | + | +---------+ + | + | +---------+ + +----+ Rows | + +---------+ + + etc. +``` + +All value classes are immutable, but for efficiency they don't make defensive copies of their +fields. If these fields are mutable (for example collections), they shouldn't be modified after +creating a message instance. + +The code makes very few assumptions about how the messages will be used. Data is often represented +in the most simple way. For example, `ProtocolConstants` uses simple integer constants to represent +protocol codes (enums wouldn't work at that level, because we need to add new codes in the DSE +driver); the driver generally rewraps them in more type-safe structures before exposing them to +higher-level layers. + +#### Encoding/decoding + +For every message, there is a corresponding `Message.Codec` for encoding and decoding. A +`FrameCodec` relies on a set of message codecs, for one or more protocol versions. Given an incoming +frame, it looks up the right message codec to use, based on the protocol version and opcode. +Optionally, it compresses frame bodies with a `Compressor`. + + +```ditaa ++-----------------+ +-------------------+ +| FrameCodec[B] +----------------+ PrimitiveCodec[B] | ++-----------------+ +-------------------+ +| B encode(Frame) | +| Frame decode(B) +-------+ +---------------+ ++------+----------+ +--------+ Compressor[B] | + | +---------------+ + | + | +-------------------+ + +---------------------------+ Message.Codec | + 1 codec per opcode +-------------------+ + and protocol version | B encode(Message) | + | Message decode(B) | + +-------------------+ +``` + +Most of the time, you'll want to use the full set of message codecs for a given protocol version. +`CodecGroup` provides a convenient way to register multiple codecs at once. The project provides +default implementations for all supported protocol version, both for clients like the driver (e.g. +encode `QUERY`, decode `RESULT`), or servers like Simulacron (decode `QUERY` encode `RESULT`). + + +```ditaa ++-------------+ +| CodecGroup | ++------+------+ + | + | +------------------------+ + +----+ ProtocolV3ClientCodecs | + | +------------------------+ + | + | +------------------------+ + +----+ ProtocolV3ServerCodecs | + | +------------------------+ + | + | +------------------------+ + +----+ ProtocolV4ClientCodecs | + | +------------------------+ + | + | +------------------------+ + +----+ ProtocolV4ClientCodecs | + | +------------------------+ + | + | +------------------------+ + +----+ ProtocolV5ClientCodecs | + | +------------------------+ + | + | +------------------------+ + +----+ ProtocolV5ClientCodecs | + +------------------------+ +``` + +The native protocol layer is agnostic to the actual binary representation. In the driver, this +happens to be a Netty `ByteBuf`, but the encoding logic doesn't need to be aware of that. This is +expressed by the type parameter `B` in `FrameCodec`. `PrimitiveCodec` abstracts the basic +primitives to work with a `B`: how to create an instance, read and write data to it, etc. + +```java +public interface PrimitiveCodec { + B allocate(int size); + int readInt(B source); + void writeInt(int i, B dest); + ... +} +``` + +Everything else builds upon those primitives. By just switching the `PrimitiveCodec` implementation, +the whole protocol layer could be reused with a different type, such as `byte[]`. + +In summary, to initialize a `FrameCodec`, you need: + +* a `PrimitiveCodec`; +* a `Compressor` (optional); +* one or more `CodecGroup`s. + +### Integration in the driver + +The driver initializes its `FrameCodec` in `DefaultDriverContext.buildFrameCodec()`. + +* the primitive codec is `ByteBufPrimitiveCodec`, which implements the basic primitives for Netty's + `ByteBuf`; +* the compressor comes from `DefaultDriverContext.buildCompressor()`, which determines the + implementation from the configuration; +* it is built with `FrameCodec.defaultClient`, which is a shortcut to use the default client groups: + `ProtocolV3ClientCodecs`, `ProtocolV4ClientCodecs` and `ProtocolV5ClientCodecs`. + +### Extension points + +The default frame codec can be replaced by [extending the +context](../common/context/#overriding-a-context-component) to override `buildFrameCodec`. This +can be used to add or remove a protocol version, or replace a particular codec. + +If protocol versions change, `ProtocolVersionRegistry` will likely be affected as well. + +Also, depending on the nature of the protocol changes, the driver's [request +processors](../request_execution/#request-processors) might require some adjustments: either replace +them, or introduce separate ones (possibly with new `executeXxx()` methods on a custom session +interface). diff --git a/manual/developer/netty_pipeline/README.md b/manual/developer/netty_pipeline/README.md new file mode 100644 index 00000000000..b596832e202 --- /dev/null +++ b/manual/developer/netty_pipeline/README.md @@ -0,0 +1,180 @@ + + +## Netty pipeline + +With the [protocol layer](../native_protocol) in place, the next step is to build the logic for a +single server connection. + +We use [Netty](https://netty.io/) for network I/O (to learn more about Netty, [this +book](https://www.manning.com/books/netty-in-action) is an excellent resource). + +```ditaa + +----------------+ + | ChannelFactory | + +----------------+ + | connect() | + +-------+--------+ + | Application + |creates +----------------------------------------------+ + V | Outgoing | + +-------+--------+ | | +---------------------+ ^ | + | DriverChannel | | | | ProtocolInitHandler | | | + +-------+--------+ | | +---------------------+ | | + | | | | | + +-------+--------+ | | +---------------------+ | | + | Channel | | | | InFlightHandler | | | + | (Netty) | | | +---------------------+ | | + +-------+--------+ | | | | + | | | +---------------------+ | | + +-------+--------+ | | | Heartbeathandler | | | + |ChannelPipeline +---+ | +---------------------+ | | + | (Netty) | | | | | + +----------------+ | | +--------------+ +--------------+ | | + | | | FrameEncoder | | FrameDecoder | | | + | | +--------------+ +--------------+ | | + | | | | + | | +---------------------+ | | + | | | SslHandler | | | + | | | (Netty) | | | + | V +---------------------+ | | + | Incoming | + +----------------------------------------------+ + Network +``` + +Each Cassandra connection is based on a Netty `Channel`. We wrap it into our own `DriverChannel`, +that exposes higher-level operations. `ChannelFactory` is the entry point for other driver +components; it handles protocol negotiation for the first channel. + +A Netty channel has a *pipeline*, that contains a sequence of *handlers*. As a request is sent, it +goes through the pipeline top to bottom; each successive handler processes the input, and passes the +result to the next handler. Incoming responses go the other way. + +Our pipeline is configured with the following handlers: + +### SslHandler + +The implementation is provided by Netty (all the others handlers are custom implementations). + +Internally, handler instances are provided by `SslHandlerFactory`. At the user-facing level, this is +abstracted behind `SslEngineFactory`, based on Java's default SSL implementation. + +See also the [Extension points](#extension-points) section below. + +### FrameEncoder and FrameDecoder + +This is where we integrate the protocol layer, as explained +[here](../native_protocol/#integration-in-the-driver). + +Unlike the other pipeline stages, we use separate handlers for incoming and outgoing messages. + +### HeartbeatHandler + +The heartbeat is a background request sent on inactive connections (no reads since x seconds), to +make sure that they are still alive, and prevent them from being dropped by a firewall. This is +similar to TCP_KeepAlive, but we provide an application-side alternative because users don't always +have full control over their network configuration. + +`HeartbeatHandler` is based on Netty's built-in `IdleStateHandler`, so there's not much in there +apart from the details of the control request. + +### InFlightHandler + +This handler is where most of the connection logic resides. It is responsible for: + +* writing regular requests: + * find an available stream id; + * store the `ResponseCallback` provided by the client under that id; + * when the response comes in, retrieve the callback and complete it; +* cancelling a request; +* switching the connection to a new keyspace (if a USE statement was executed through the session); +* handling shutdown: gracefully (allow all request to complete), or forcefully (error out all + requests). + +The two most important methods are: + +* `write(ChannelHandlerContext, Object, ChannelPromise)`: processes outgoing messages. We accept + different types of messages, because cancellation and shutdown also use that path. See + `DriverChannel`, which abstracts those details. +* `channelRead`: processes incoming responses. + +Netty handlers are confined to the channel's event loop (a.k.a I/O thread). Therefore the code +doesn't have to be concurrent, fields can be non-volatile and methods are guaranteed not to race +with each other. + +In particular, a big difference from driver 3 is that stream ids are assigned within the event loop, +instead of from client code before writing to the channel (see also [connection +pooling](../request_execution/#connection_pooling)). `StreamIdGenerator` is not thread-safe. + +All communication between the handler and the outside world must be done through messages or channel +events. There are 3 exceptions to this rule: `getAvailableIds`, `getInflight` and `getOrphanIds`, +which are based on volatile fields. They are all used for metrics, and `getAvailableIds` is also +used to balance the load over connections to the same node (see `ChannelSet`). + +### ProtocolInitHandler + +This handler manages the protocol initialization sequence on a newly established connection (see the +`STARTUP` message in the protocol specification). + +Most of the logic resides in `InitRequest.onResponse`, which acts as a simple state machine based on +the last request sent. + +There is also a bit of custom code to ensure that the channel is not made available to clients +before the protocol is ready. This is abstracted in the parent class `ConnectInitHandler`. + +Once the initialization is complete, `ProtocolInitHandler` removes itself from the pipeline. + +### Extension points + +#### NettyOptions + +The `advanced.netty` section in the [configuration](../../core/configuration/reference/) exposes a +few high-level options. + +For more elaborate customizations, you can [extend the +context](../common/context/#overriding-a-context-component) to plug in a custom `NettyOptions` +implementation. This allows you to do things such as: + +* reusing existing event loops; +* using Netty's [native Epoll transport](https://netty.io/wiki/native-transports.html); +* adding custom handlers to the pipeline. + +#### SslHandlerFactory + +The [user-facing API](../../core/ssl/) (`advanced.ssl-engine-factory` in the configuration, or +`SessionBuilder.withSslContext` / `SessionBuilder.withSslEngineFactory`) only supports Java's +default SSL implementation. + +The driver can also work with Netty's [native +integration](https://netty.io/wiki/requirements-for-4.x.html#tls-with-openssl) with OpenSSL or +boringssl. This requires a bit of custom development against the internal API: + +* add a dependency to one of the `netty-tcnative` artifacts, following [these + instructions](http://netty.io/wiki/forked-tomcat-native.html); +* implement `SslHandlerFactory`. Typically: + * the constructor will create a Netty [SslContext] with [SslContextBuilder.forClient], and store + it in a field; + * `newSslHandler` will delegate to one of the [SslContext.newHandler] methods; +* [extend the context](../common/context/#overriding-a-context-component) and override + `buildSslHandlerFactory` to plug your custom implementation. + +[SslContext]: https://netty.io/4.1/api/io/netty/handler/ssl/SslContext.html +[SslContext.newHandler]: https://netty.io/4.1/api/io/netty/handler/ssl/SslContext.html#newHandler-io.netty.buffer.ByteBufAllocator- +[SslContextBuilder.forClient]: https://netty.io/4.1/api/io/netty/handler/ssl/SslContextBuilder.html#forClient-- diff --git a/manual/developer/request_execution/README.md b/manual/developer/request_execution/README.md new file mode 100644 index 00000000000..38a0a55fbd7 --- /dev/null +++ b/manual/developer/request_execution/README.md @@ -0,0 +1,342 @@ + + +## Request execution + +The [Netty pipeline](../netty_pipeline/) gives us the ability to send low-level protocol messages on +a single connection. + +The request execution layer builds upon that to: + +* manage multiple connections (many nodes, possibly many connections per node); +* abstract the protocol layer behind higher-level, user-facing types. + +The session is the main entry point. `CqlSession` is the type that users will most likely reference +in their applications. It extends a more generic `Session` type, for the sake of extensibility; this +will be explained in [Request processors](#request-processors). + + +```ditaa ++----------------------------------+ +| Session | ++----------------------------------+ +| ResultT execute( | +| RequestT, GenericType[ResultT])| ++----------------------------------+ + ^ + | ++----------------+-----------------+ +| CqlSession | ++----------------------------------+ +| ResultSet execute(Statement) | ++----------------+-----------------+ + ^ + | ++----------------+-----------------+ +| DefaultSession | ++----------------+-----------------+ + | + | + | 1 per node +-------------+ + +------------+ ChannelPool | + | +----+--------+ + | | + | | n +---------------+ + | +----+ DriverChannel | + | +---------------+ + | + | 1 +--------------------------+ + +------------+ RequestProcessorRegistry | + +----+---------------------+ + | + | n +---------------------------+ + +----+ RequestProcessor | + +---------------------------+ + | ResultT process(RequestT) | + +---------------------------+ +``` + +`DefaultSession` contains the session implementation. It follows the [confined inner +class](../common/concurrency/#cold-path) pattern to simplify concurrency. + +### Connection pooling + +```ditaa ++----------------------+ 1 +------------+ +| ChannelPool +---------+ ChannelSet | ++----------------------+ +-----+------+ +| DriverChannel next() | | ++----------+-----------+ n| + | +------+--------+ + 1| | DriverChannel | + +------+-------+ +---------------+ + | Reconnection | + +--------------+ +``` + +`ChannelPool` handles the connections to a given node, for a given session. It follows the [confined +inner class](../common/concurrency/#cold-path) pattern to simplify concurrency. There are a few +differences compared to the 3.x implementation: + +#### Fixed size + +The pool has a fixed number of connections, it doesn't grow or shrink dynamically based on current +usage. In other words, there is no more "max" size, only a "core" size. + +However, this size is specified in the configuration. If the value is changed at runtime, the driver +will detect it, and trigger a resize of all active pools. + +The rationale for removing the dynamic behavior is that it introduced a ton of complexity in the +implementation and configuration, for unclear benefits: if the load fluctuates very rapidly, then +you need to provision for the max size anyway, so you might as well run with all the connections all +the time. If on the other hand the fluctuations are rare and predictable (e.g. peak for holiday +sales), then a manual configuration change is good enough. + +#### No queuing + +To get a connection to a node, client code calls `ChannelPool.next()`. This returns the less busy +connection, based on the the `getAvailableIds()` counter exposed by +[InFlightHandler](netty_pipeline/#in-flight-handler). + +If all connections are busy, there is no queuing; the driver moves to the next node immediately. The +rationale is that it's better to try another node that might be ready to reply, instead of +introducing an additional wait for each node. If the user wants queuing when all nodes are busy, +it's better to do it at the session level with a [throttler](../../core/throttling/), which provides +more intuitive configuration. + +Before 4.5.0, there was also no preemptive acquisition of the stream id outside of the event loop: +`getAvailableIds()` had volatile semantics, and a client could get a pooled connection that seemed +not busy, but fail to acquire a stream id when it later tried the actual write. This turned out to +not work well under high load, see [JAVA-2644](https://datastax-oss.atlassian.net/browse/JAVA-2644). + +Starting with 4.5.0, we've reintroduced a stronger guarantee (reminiscent of how things worked in +3.x): clients **must call `DriverChannel.preAcquireId()` exactly once before each write**. If the +call succeeds, `getAvailableIds()` is incremented immediately, and the client is guaranteed that +there will be a stream id available for the write. `preAcquireId()` and `getAvailableIds()` have +atomic semantics, so we can distribute the load more accurately. + +This comes at the cost of additional complexity: **we must ensure that every write is pre-acquired +first**, so that `getAvailableIds()` doesn't get out of sync with the actual stream id usage inside +`InFlightHandler`. This is explained in detail in the javadocs of `DriverChannel.preAcquireId()`, +read them carefully. + +The pool manages its channels with `ChannelSet`, a simple copy-on-write data structure. + +#### Built-in reconnection + +The pool has its own independent reconnection mechanism (based on the `Reconnection` utility class). +The goal is to keep the pool at its expected capacity: whenever a connection is lost, the task +starts and will try to reopen the missing connections at regular intervals. + +### Request processors + +```ditaa ++----------------------------------+ +| Session | ++----------------------------------+ +| ResultT execute( | +| RequestT, GenericType[ResultT])| ++----------------------------------+ + ^ + | ++----------------+-----------------+ +| CqlSession | ++----------------------------------+ +| ResultSet execute(Statement) | ++----------------+-----------------+ +``` + +The driver can execute different types of requests, in different ways. This is abstracted by the +top-level `Session` interface, with a very generic execution method: + +```java + ResultT execute( + RequestT request, GenericType resultType); +``` + +It takes a request, and a type token that serves as a hint at the expected result. Each `(RequestT, +ResultT)` combination defines an execution model, for example: + +| `RequestT` | `ResultT` | Execution | +| --- | --- | ---| +| `Statement` | `ResultSet` | CQL, synchronous | +| `Statement` | `CompletionStage` | CQL, asynchronous | +| `Statement` | `ReactiveResultSet` | CQL, reactive | +| `GraphStatement` | `GraphResultSet` | DSE Graph, synchronous | +| `GraphStatement` | `CompletionStage` | DSE Graph, asynchronous | + +In general, regular client code doesn't use `Session.execute` directly. Instead, child interfaces +expose more user-friendly shortcuts for a given result type: + +```java +public interface CqlSession extends Session { + default ResultSet execute(Statement statement) { + return execute(statement, Statement.SYNC); + } +} +``` + +The logic for each execution model is encapsulated in a `RequestProcessor`. +Processors are stored in a `RequestProcessorRegistry`. For each request, the session invokes the +registry to find the processor that matches the request and result types. + +```ditaa ++----------------+ 1+-----------------------------------+ +| DefaultSession +---+ RequestProcessorRegistry | ++----------------+ +-----------------------------------+ + | processorFor( | + | RequestT, GenericType[ResultT]) | + +-----------------+-----------------+ + | + |n + +----------------------+----------------------+ + | RequestProcessor[RequestT, ResultT] | + +---------------------------------------------+ + | boolean canProcess(Request, GenericType[?]) | + | ResultT process(RequestT) | + +---------------------------------------------+ + ^ + | +--------------------------+ + +---------+ CqlRequestSyncProcessor | + | +--------------------------+ + | + | +--------------------------+ + +---------+ CqlRequestAsyncProcessor | + | +--------------------------+ + | + | +--------------------------+ + +---------+ CqlPrepareSyncProcessor | + | +--------------------------+ + | + | +--------------------------+ + +---------+ CqlPrepareAsyncProcessor | + +--------------------------+ +``` + +A processor is responsible for: + +* converting the user request into [protocol-level messages](../native_protocol/); +* selecting a coordinator node, and obtaining a channel from its connection pool; +* writing the request to the channel; +* handling timeouts, retries and speculative executions; +* translating the response into user-level types. + +The `RequestProcessor` interface makes very few assumptions about the actual processing; but in +general, implementations create a handler for the lifecycle of every request. For example, +`CqlRequestHandler` is the central component for basic CQL execution. + +Processors can be implemented in terms of other processors. In particular, this is the case for +synchronous execution models, which are just a blocking wrapper around their asynchronous +counterpart. You can observe this in `CqlRequestSyncProcessor`. + +Note that preparing a statement is treated as just another execution model. It has its own +processors, that operate on a special `PrepareRequest` type: + +```java +public interface CqlSession extends Session { + default PreparedStatement prepare(SimpleStatement statement) { + return execute(new DefaultPrepareRequest(statement), PrepareRequest.SYNC); + } +} +``` + +### Extension points + +#### RequestProcessorRegistry + +You can customize the set of request processors by [extending the +context](../common/context/#overriding-a-context-component) and overriding +`buildRequestProcessorRegistry`. + +This can be used to either: + +* add your own execution models (new request types and/or return types); +* remove existing ones; +* or a combination of both. + +The driver codebase contains an integration test that provides a complete example: +[RequestProcessorIT]. It shows how you can build a session that returns Guava's `ListenableFuture` +instead of Java's `CompletionStage` (existing request type, different return type). + +[GuavaDriverContext] is the custom context subclass. It plugs a custom registry that wraps the +default async processors with [GuavaRequestAsyncProcessor], to transform the returned futures. + +Note that the default async processors are not present in the registry anymore; if you try to call +a method that returns a `CompletionStage`, it fails. See the next section for how to hide those +methods. + +#### Exposing a custom session interface + +If you add or remove execution models, you probably want to expose a session interface that matches +the underlying capabilities of the implementation. + +For example, in the [RequestProcessorIT] example mentioned in the previous section, we remove the +ability to return `CompletionStage`, but add the ability to return `ListenableFuture`. Therefore we +expose a custom [GuavaSession] with a different return type for async methods: + +```java +public interface GuavaSession extends Session { + default ListenableFuture executeAsync(Statement statement) { ... } + default ListenableFuture prepareAsync(SimpleStatement statement) { ... } +} +``` + +We need an implementation of this interface. Our new methods all have default implementations in +term of the abstract `Session.execute()`, so the only thing we need is to delegate to an existing +`Session`. The driver provides `SessionWrapper` to that effect. See [DefaultGuavaSession]: + +```java +public class DefaultGuavaSession extends SessionWrapper implements GuavaSession { + public DefaultGuavaSession(Session delegate) { + super(delegate); + } +} +``` + +Finally, we want to create an instance of this wrapper. Since we extended the context (see previous +section), we already wrote a custom builder subclass; there is another protected method we can +override to plug our wrapper. See [GuavaSessionBuilder]: + +```java +public class GuavaSessionBuilder extends SessionBuilder { + + @Override + protected DriverContext buildContext( ... ) { ... } + + @Override + protected GuavaSession wrap(CqlSession defaultSession) { + return new DefaultGuavaSession(defaultSession); + } +``` + +Client code can now use the familiar pattern to create a session: + +```java +GuavaSession session = new GuavaSessionBuilder() + .addContactEndPoints(...) + .withKeyspace("test") + .build(); +``` + +[RequestProcessorIT]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/core/session/RequestProcessorIT.java +[GuavaDriverContext]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaDriverContext.java +[GuavaRequestAsyncProcessor]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/GuavaRequestAsyncProcessor.java +[GuavaSession]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSession.java +[DefaultGuavaSession]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/internal/DefaultGuavaSession.java +[GuavaSessionBuilder]: https://github.com/datastax/java-driver/blob/4.x/integration-tests/src/test/java/com/datastax/oss/driver/example/guava/api/GuavaSessionBuilder.java diff --git a/manual/mapper/.nav b/manual/mapper/.nav index 09b843995be..7bfdb6c0c8e 100644 --- a/manual/mapper/.nav +++ b/manual/mapper/.nav @@ -1,4 +1,5 @@ entities daos mapper -config \ No newline at end of file +config +custom_types \ No newline at end of file diff --git a/manual/mapper/README.md b/manual/mapper/README.md index f83f08d475c..27005b671ad 100644 --- a/manual/mapper/README.md +++ b/manual/mapper/README.md @@ -1,33 +1,30 @@ + + ## Mapper The mapper generates the boilerplate to execute queries and convert the results into application-level objects. -It is published as two artifacts: - -* the `java-driver-mapper-processor` module is **only needed in the compile classpath**, your - application doesn't need to depend on it at runtime. - - ```xml - - com.datastax.oss - java-driver-mapper-processor - 4.1.0 - - ``` - - See [Configuring the annotation processor](config/) for detailed instructions for different - build tools. - -* the `java-driver-mapper-runtime` module is a regular runtime dependency: - - ```xml - - com.datastax.oss - java-driver-mapper-runtime - 4.1.0 - - ``` +It is published as two artifacts: `org.apache.cassandra:java-driver-mapper-processor` and +`org.apache.cassandra:java-driver-mapper-runtime`. See [Integration](config/) for detailed instructions +for different build tools. ### Quick start diff --git a/manual/mapper/config/README.md b/manual/mapper/config/README.md index 2b8f6dd702d..1e4f9981306 100644 --- a/manual/mapper/config/README.md +++ b/manual/mapper/config/README.md @@ -1,13 +1,32 @@ -## Configuring the annotation processor + + +## Integration -The mapper's annotation processor hooks into the Java compiler, and generates additional source -files from your annotated classes before the main compilation happens. It is contained in the -`java-driver-mapper-processor` artifact. +### Builds tools -As a reminder, there is also a `java-driver-mapper-runtime` artifact, which contains the annotations -and a few utility classes. This one is a regular dependency, and it is required at runtime. +The `java-driver-mapper-processor` artifact contains the annotation processor. It hooks into the +Java compiler, and generates additional source files from your annotated classes before the main +compilation happens. It is only required in the compile classpath. -### Builds tools +The `java-driver-mapper-runtime` artifact contains the annotations and a few utility classes. It is +a regular dependency, required at runtime. #### Maven @@ -21,7 +40,7 @@ configuration (make sure you use version 3.5 or higher): - com.datastax.oss + org.apache.cassandra java-driver-mapper-runtime ${java-driver.version} @@ -37,10 +56,17 @@ configuration (make sure you use version 3.5 or higher): 1.8 - com.datastax.oss + org.apache.cassandra java-driver-mapper-processor ${java-driver.version} + + + org.slf4j + slf4j-nop + 1.7.26 + @@ -48,6 +74,25 @@ configuration (make sure you use version 3.5 or higher): ``` +Alternatively (e.g. if you are using the [BOM](../../core/bom/)), you may also declare the processor +as a regular dependency in the "provided" scope: + +```xml + + + org.apache.cassandra + java-driver-mapper-processor + ${java-driver.version} + provided + + + org.apache.cassandra + java-driver-mapper-runtime + ${java-driver.version} + + +``` + The processor runs every time you execute the `mvn compile` phase. It normally supports incremental builds, but if something looks off you can try a full rebuild with `mvn clean compile`. @@ -83,183 +128,7 @@ You will find the generated files in `build/generated/sources/annotationProcesso ### Integration with other languages and libraries -#### Lombok - -[Lombok](https://projectlombok.org/) is a popular library that automates boilerplate code, such as -getters and setters. This can be convenient for mapped entities: - -```java -import com.datastax.oss.driver.api.mapper.annotations.Entity; -import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.Setter; -import lombok.ToString; - -@Entity -@EqualsAndHashCode -@ToString -public class Product { - @PartitionKey @Getter @Setter private int id; - @Getter @Setter private String description; -} -``` - -The mapper can process Lombok-annotated classes just like regular code. The only requirement is that -Lombok's annotation processor must run *before* the mapper's. - -With Maven, declaring Lombok as a provided dependency is not enough; you must also redeclare it in -the `` section, before the mapper: - -```xml - - ... - ... - - - - - com.datastax.oss - java-driver-mapper-runtime - ${java-driver.version} - - - org.projectlombok - lombok - ${lombok.version} - provided - - - - - - - maven-compiler-plugin - 3.8.1 - - 1.8 - 1.8 - - - org.projectlombok - lombok - ${lombok.version} - - - com.datastax.oss - java-driver-mapper-processor - ${java-driver.version} - - - - - - -``` - -With Gradle, a similar result can be achieved with: - -```groovy -apply plugin: 'java' - -def javaDriverVersion = '...' -def lombokVersion = '...' - -dependencies { - annotationProcessor group: 'org.projectlombok', name: 'lombok', version: lombokVersion - annotationProcessor group: 'com.datastax.oss', name: 'java-driver-mapper-processor', version: javaDriverVersion - compile group: 'com.datastax.oss', name: 'java-driver-mapper-runtime', version: javaDriverVersion - compileOnly group: 'org.projectlombok', name: 'lombok', version: lombokVersion -} -``` - -You'll also need to install a Lombok plugin in your IDE (for IntelliJ IDEA, [this -one](https://plugins.jetbrains.com/plugin/6317-lombok) is available in the marketplace). - -#### Kotlin - -[Kotlin](https://kotlinlang.org/) is an alternative language for the JVM. Its compact syntax and -native support for annotation processing make it a good fit for the mapper. - -To set up your project, refer to the Kotlin website: - -* Maven: configure [dual compilation][maven_kotlin_java] of Kotlin and Java sources. In addition, - you'll need an additional execution of the [kotlin-maven-plugin:kapt][maven_kapt] goal with the - mapper processor before compilation: - - ```xml - - org.jetbrains.kotlin - kotlin-maven-plugin - ${kotlin.version} - - - kapt - kapt - - - src/main/kotlin - src/main/java - - - - com.datastax.oss - java-driver-mapper-processor - ${java-driver.version} - - - - - - compile - compile - ... - - - - ``` - -* Gradle: configure the [kotlin][gradle_kotlin] and [kotlin_kapt][gradle_kapt] plugins in your build - script. In addition, declare the dependency to the mapper processor with `kapt` instead of - `annotationProcessor`: - - ```groovy - apply plugin: 'kotlin' - apply plugin: 'kotlin-kapt' - - dependencies { - kapt group: 'com.datastax.oss', name: 'java-driver-mapper-processor', version: javaDriverVersion - ... - } - ``` - -You can use Kotlin [data classes] for your entities. Just keep in mind that the mapper expects a -no-arg constructor, which means that you must define default values; and setters, which means that -properties must be declared with `var`, not `val`. - -```kotlin -@Entity -data class Product(@PartitionKey var id: Int? = null, var description: String? = null) -``` - -All of the [property annotations](../entities/#property-annotations) can be declared directly on the -constructor properties. - -If you want to take advantage of [null saving strategies](../daos/null_saving/), your properties -should be nullable. - -The other mapper interfaces are pretty similar to the Java versions: - -```kotlin -@Dao -interface ProductDao { - @Insert - fun insert(product: Product) -} -``` - -[maven_kotlin_java]: https://kotlinlang.org/docs/reference/using-maven.html#compiling-kotlin-and-java-sources -[maven_kapt]: https://kotlinlang.org/docs/reference/kapt.html#using-in-maven -[gradle_kotlin]: https://kotlinlang.org/docs/reference/using-gradle.html -[gradle_kapt]: https://kotlinlang.org/docs/reference/kapt.html#using-in-gradle -[data classes]: https://kotlinlang.org/docs/reference/data-classes.html +* [Kotlin](kotlin/) +* [Lombok](lombok/) +* [Java 14 records](record/) +* [Scala](scala/) diff --git a/manual/mapper/config/kotlin/README.md b/manual/mapper/config/kotlin/README.md new file mode 100644 index 00000000000..a78bf04fb79 --- /dev/null +++ b/manual/mapper/config/kotlin/README.md @@ -0,0 +1,128 @@ + + +## Kotlin + +[Kotlin](https://kotlinlang.org/) is an alternative language for the JVM. Its compact syntax and +native support for annotation processing make it a good fit for the mapper. + +We have a full example at [DataStax-Examples/object-mapper-jvm/kotlin]. + +### Writing the model + +You can use Kotlin [data classes] for your entities. Data classes are usually +[immutable](../../entities/#mutability), but you don't need to declare that explicitly with +[@PropertyStrategy]: the mapper detects that it's processing Kotlin code, and will assume `mutable = +false` by default: + +```kotlin +@Entity +data class Product(@PartitionKey val id: Int?, val description: String?) +``` + +Data classes may also be made mutable (by declaring the components with `var` instead of `val`). If +you choose that approach, you'll have to annotate your entities with [@PropertyStrategy], and also +declare a default value for every component in order to generate a no-arg constructor: + +```kotlin +@Entity +@PropertyStrategy(mutable = true) +data class Product(@PartitionKey var id: Int? = null, var description: String? = null) +``` + +All of the [property annotations](../../entities/#property-annotations) can be declared directly on +the components. + +If you want to take advantage of [null saving strategies](../../daos/null_saving/), your components +should be nullable. + +The other mapper interfaces are direct translations of the Java versions: + +```kotlin +@Dao +interface ProductDao { + @Insert + fun insert(product: Product) +} +``` + +Known limitation: because of a Kotlin bug ([KT-4779]), you can't use default interface methods. They +will appear as abstract methods to the mapper processor, which will generate an error since they are +not properly annotated. As a workaround, you can use a companion object method that takes the DAO as +an argument (as shown in [UserDao.kt]), or query provider methods. + +### Building + +#### Gradle + +See the example's [build.gradle]. + +You enable Kotlin support with [kotlin][gradle_kotlin] and [kotlin_kapt][gradle_kapt], and declare +the mapper processor with the `kapt` directive. + +#### Maven + +Configure [dual compilation][maven_kotlin_java] of Kotlin and Java sources. In addition, you'll need +an additional execution of the [kotlin-maven-plugin:kapt][maven_kapt] goal with the mapper processor +before compilation: + +```xml + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + kapt + kapt + + + src/main/kotlin + src/main/java + + + + org.apache.cassandra + java-driver-mapper-processor + ${java-driver.version} + + + + + + compile + compile + ... + + + +``` + +[maven_kotlin_java]: https://kotlinlang.org/docs/reference/using-maven.html#compiling-kotlin-and-java-sources +[maven_kapt]: https://kotlinlang.org/docs/reference/kapt.html#using-in-maven +[gradle_kotlin]: https://kotlinlang.org/docs/reference/using-gradle.html +[gradle_kapt]: https://kotlinlang.org/docs/reference/kapt.html#using-in-gradle +[data classes]: https://kotlinlang.org/docs/reference/data-classes.html +[KT-4779]: https://youtrack.jetbrains.com/issue/KT-4779 + +[DataStax-Examples/object-mapper-jvm/kotlin]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/kotlin +[build.gradle]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/kotlin/build.gradle +[UserDao.kt]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/kotlin/src/main/kotlin/com/datastax/examples/mapper/killrvideo/user/UserDao.kt + +[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html diff --git a/manual/mapper/config/lombok/README.md b/manual/mapper/config/lombok/README.md new file mode 100644 index 00000000000..b87f8f79ea4 --- /dev/null +++ b/manual/mapper/config/lombok/README.md @@ -0,0 +1,109 @@ + + +## Lombok + +[Lombok](https://projectlombok.org/) is a popular library that automates repetitive code, such as +getters and setters. You can use it in conjunction with the mapper to eliminate even more +boilerplate. + +We have a full example at [DataStax-Examples/object-mapper-jvm/lombok]. + +### Writing the model + +You can either map mutable "data" classes: + +```java +import lombok.Data; +import com.datastax.oss.driver.api.mapper.annotations.*; + +@Data +@Entity +public class Product { + @PartitionKey private int id; + private String description; +} +``` + +Or immutable "value" classes: + +```java +import lombok.Value; +import com.datastax.oss.driver.api.mapper.annotations.*; + +@Value +@Entity +@PropertyStrategy(mutable = false) +public class Product { + @PartitionKey private int id; + private String description; +} +``` + +You can also use Lombok's fluent accessors if you configure the mapper accordingly: + +```java +import lombok.Data; +import lombok.experimental.Accessors; +import com.datastax.oss.driver.api.mapper.annotations.*; +import com.datastax.oss.driver.api.mapper.entity.naming.*; + +@Data +@Accessors(fluent = true) +@Entity +@PropertyStrategy(getterStyle = GetterStyle.FLUENT, setterStyle = SetterStyle.FLUENT) +public static class Product { + @PartitionKey private int id; + private String description; +} +``` + +### Building + +You'll need to configure the Lombok annotation processor in your build. The only requirement is that +it must run *before* the mapper's. + +#### Maven + +See the compiler plugin's configuration in the example's [pom.xml]. + +#### Gradle + +A similar result can be achieved with: + +```groovy +apply plugin: 'java' + +def javaDriverVersion = '...' +def lombokVersion = '...' + +dependencies { + annotationProcessor group: 'org.projectlombok', name: 'lombok', version: lombokVersion + annotationProcessor group: 'com.datastax.oss', name: 'java-driver-mapper-processor', version: javaDriverVersion + compile group: 'com.datastax.oss', name: 'java-driver-mapper-runtime', version: javaDriverVersion + compileOnly group: 'org.projectlombok', name: 'lombok', version: lombokVersion +} +``` + +You'll also need to install a Lombok plugin in your IDE (for IntelliJ IDEA, [this +one](https://plugins.jetbrains.com/plugin/6317-lombok) is available in the marketplace). + + +[DataStax-Examples/object-mapper-jvm/lombok]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/lombok +[pom.xml]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/lombok/pom.xml diff --git a/manual/mapper/config/record/README.md b/manual/mapper/config/record/README.md new file mode 100644 index 00000000000..95530d52742 --- /dev/null +++ b/manual/mapper/config/record/README.md @@ -0,0 +1,54 @@ + + +## Java 14 Records + +Java 14 introduced [Record] as a lightweight, immutable alternative to POJOs. You can map annotated +records as entities. + +We have a full example at [DataStax-Examples/object-mapper-jvm/record]. + +Note: records are a **preview feature** of Java 14. As such the mapper's support for them is also +provided as a preview. + +### Writing the model + +Annotate your records like regular classes: + +```java +@Entity +record Product(@PartitionKey int id, String description) {} +``` + +Records are immutable and use the [fluent getter style](../../entities#getter-style), but you don't +need to declare that explicitly with [@PropertyStrategy]: the mapper detects when it's processing a +record, and will assume `mutable = false, getterStyle = FLUENT` by default. + +### Building + +You need to build with Java 14, and pass the `--enable-preview` flag to both the compiler and the +runtime JVM. See [pom.xml] in the example. + + +[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html + +[DataStax-Examples/object-mapper-jvm/record]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/record +[pom.xml]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/record/pom.xml + +[Record]: https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/lang/Record.html diff --git a/manual/mapper/config/scala/README.md b/manual/mapper/config/scala/README.md new file mode 100644 index 00000000000..2cb75273d0b --- /dev/null +++ b/manual/mapper/config/scala/README.md @@ -0,0 +1,76 @@ + + +## Scala + +[Scala](https://www.scala-lang.org/) is an alternative language for the JVM. It doesn't support +annotation processing natively, so using it with the mapper is a bit more complicated, but it can be +done. + +We have a full example at [DataStax-Examples/object-mapper-jvm/scala]. + +### Writing the model + +You can use Scala case classes for your entities. Notice the peculiar syntax for field annotations: + +```scala +@Entity +case class UserVideo(@(PartitionKey@field) userid: UUID, + @(ClusteringColumn@field)(0) addedDate: Instant, + @(ClusteringColumn@field)(1) videoid: UUID, + name: String, + previewImageLocation: String) +``` + +Case classes are immutable and use the [fluent getter style](../../entities#getter-style), but you +don't need to declare that explicitly with [@PropertyStrategy]: the mapper detects when it's +processing a case class, and will assume `mutable = false, getterStyle = FLUENT` by default. + +The DAOs and main mapper can be defined as Scala traits, that are direct translations of their Java +equivalents: + +```scala +@Dao +trait UserDao { + @Select + def get(userid: UUID): User +} +``` + +### Building + +Since Scala does not support annotation processing, the mapper processor cannot operate on Scala +sources directly. But it can process the compiled class files output by the Scala compiler. So the +compilation happens in 3 phases: + +1. Compile the Scala sources with the regular sbt task. +2. Execute a custom task that runs the annotation processor (`javac -proc:only ...`) on the compiled + class files. +3. Execute another custom task that compiles the Java sources generated by the mapper. + +See the example's [build.sbt] for the full details. + +Because of that process, the sources fed to the processor cannot reference any generated code. So +the application code needs to be placed in a separate subproject, in order to have access to the +mapper builder. + +[DataStax-Examples/object-mapper-jvm/scala]: https://github.com/DataStax-Examples/object-mapper-jvm/tree/master/scala +[build.sbt]: https://github.com/DataStax-Examples/object-mapper-jvm/blob/master/scala/build.sbt + +[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html diff --git a/manual/mapper/daos/.nav b/manual/mapper/daos/.nav index 1337eb64101..be60381834f 100644 --- a/manual/mapper/daos/.nav +++ b/manual/mapper/daos/.nav @@ -6,5 +6,6 @@ queryprovider select setentity update +increment null_saving statement_attributes \ No newline at end of file diff --git a/manual/mapper/daos/README.md b/manual/mapper/daos/README.md index c502d5ebb86..d12172bf056 100644 --- a/manual/mapper/daos/README.md +++ b/manual/mapper/daos/README.md @@ -1,5 +1,36 @@ + + ## DAOs +### Quick overview + +Interface annotated with [@Dao]. + +* interface-level annotations: + * [@DefaultNullSavingStrategy] + * [@HierarchyScanStrategy] +* method-level annotations: query methods (see child pages). +* instantiated from a [@DaoFactory] method on the mapper. + +----- + A DAO is an interface that defines a set of query methods. In general, those queries will relate to the same [entity](../entities/) (although that is not a requirement). @@ -32,6 +63,7 @@ annotations: * [@Select](select/) * [@SetEntity](setentity/) * [@Update](update/) +* [@Increment](increment/) The methods can have any name. The allowed parameters and return type are specific to each annotation. @@ -135,7 +167,8 @@ In this case, any annotations declared in `Dao1` would be chosen over `Dao2`. To control how the hierarchy is scanned, annotate interfaces with [@HierarchyScanStrategy]. -[@Dao]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Dao.html -[@DefaultNullSavingStrategy]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.html -[@HierarchyScanStrategy]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.html +[@Dao]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Dao.html +[@DaoFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.html +[@DefaultNullSavingStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.html +[@HierarchyScanStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.html [Entity Inheritance]: ../entities/#inheritance diff --git a/manual/mapper/daos/custom_types/README.md b/manual/mapper/daos/custom_types/README.md new file mode 100644 index 00000000000..19f689655a7 --- /dev/null +++ b/manual/mapper/daos/custom_types/README.md @@ -0,0 +1,262 @@ + + +## Custom result types + +The mapper supports a pre-defined set of built-in types for DAO method results. For example, a +[Select](../select/#return-type) method can return a single entity, an asynchronous +`CompletionStage`, a `ReactiveResultSet`, etc. + +Sometimes it's convenient to use your own types. For example if you use a specific Reactive Streams +implementation (RxJava, Reactor, Mutiny...), you probably want your DAOs to return those types +directly, instead of having to wrap every call manually. + +To achieve this, the mapper allows you to plug custom logic that will get invoked when an unknown +type is encountered. + +In the rest of this page, we'll show a simple example that replaces Java's `CompletableFuture` with +Guava's `ListenableFuture`. Our goal is to have the mapper implement this interface: + +```java +import com.google.common.util.concurrent.ListenableFuture; + +@Dao +public interface ProductDao { + @Select + ListenableFuture select(UUID id); + + @Update + ListenableFuture update(Product entity); + + @Insert + ListenableFuture insert(Product entity); + + @Delete + ListenableFuture delete(Product entity); +} +``` + +### Writing the producers + +The basic component that encapsulates conversion logic is [MapperResultProducer]. Our DAO has two +different return types: `ListenableFuture` and `ListenableFuture`. So we're going to +write two producers: + +#### Future of void + +```java +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; + +public class FutureOfVoidProducer implements MapperResultProducer { + private static final GenericType> PRODUCED_TYPE = + new GenericType>() {}; + + @Override + public boolean canProduce(GenericType resultType) { + return resultType.equals(PRODUCED_TYPE); // (1) + } + + @Override + public ListenableFuture execute( + Statement statement, MapperContext context, EntityHelper entityHelper) { + CqlSession session = context.getSession(); // (2) + SettableFuture result = SettableFuture.create(); // (3) + session.executeAsync(statement).whenComplete( + (resultSet, error) -> { + if (error != null) { + result.setException(error); + } else { + result.set(null); + }}); + return result; + } + + @Override + public ListenableFuture wrapError(Exception error) { + return Futures.immediateFailedFuture(error); // (4) + } +} +``` + +All the producer methods will be invoked at runtime, by the mapper-generated DAO implementation: + +1. `canProduce()` is used to select a producer. All registered producers are tried in the order that + they were added, the first one that returns `true` is used. The [GenericType] argument is a + runtime representation of the static type. Here we know exactly the type we're looking for: + `ListenableFuture`. So we can use simple equality. +2. `execute()` is invoked once the statement is ready to be sent. Note that the producer is not only + responsible for converting the result, but also for invoking the appropriate execution method: to + this effect, it receives the [MapperContext], which provides access to the session. The + `entityHelper` argument is not used in this implementation (and in fact it happens to be `null`); + see the next producer for more explanations. +3. We execute the statement asynchronously to obtain a `CompletionStage`, and then convert it into a + `ListenableFuture`. +4. `wrapError()` handles any error thrown throughout the process (either while building the + statement, or while invoking `execute()` in this class). Clients of asynchronous APIs generally + expect to deal with exceptions in future callbacks rather than having to catch them directly, so + we create a failed future. + +Note that we specialized the return types of `execute()` and `wrapError()`, instead of using +`Object` as declared by the parent interface. This is not strictly necessary (the calling code only +knows the parent interface, so there *will* be an unchecked cast), but it makes the code a bit nicer +to read. + +#### Future of entity + +```java +public class FutureOfEntityProducer implements MapperResultProducer { + @Override + public boolean canProduce(GenericType resultType) { + return resultType.getRawType().equals(ListenableFuture.class); // (1) + } + + @Override + public ListenableFuture execute( + Statement statement, MapperContext context, EntityHelper entityHelper) { + assert entityHelper != null; + SettableFuture result = SettableFuture.create(); + CqlSession session = context.getSession(); + session + .executeAsync(statement) + .whenComplete( + (resultSet, error) -> { + if (error != null) { + result.setException(error); + } else { + Row row = resultSet.one(); + result.set((row == null) ? null : entityHelper.get(row)); // (2) + } + }); + return result; + } + + @Override + public ListenableFuture wrapError(Exception error) { + return Futures.immediateFailedFuture(error); // same as other producer + } +} +``` + +1. We could use an exact match with `ListenableFuture` like the previous example, but + that's not very scalable: in a real application, we'll probably have more than one entity, we + don't want to write a separate producer every time. So instead we match `ListenableFuture`. + Note that this would also match `ListenableFuture`, so we'll have to be careful of the order + of the producers (more on that in the "packaging" section below). +2. Whenever a return type references a mapped entity, the mapper processor will detect it and inject + the corresponding [EntityHelper] in the `execute()` method. This is a general-purpose utility + class used throughout the mapper, in this case the method we're more specifically interested in is + `get()`: it allows us to convert CQL rows into entity instances. + +At most one entity class is allowed in the return type. + +#### Matching more complex types + +The two examples above (exact match and matching the raw type) should cover the vast majority of +needs. Occasionally you may encounter cases with deeper level of parameterization, such as +`ListenableFuture>`. To match this you'll have to call `getType()` and switch to +the `java.lang.reflect` world: + +```java +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; + +// Matches ListenableFuture> +public boolean canProduce(GenericType genericType) { + if (genericType.getRawType().equals(ListenableFuture.class)) { + Type type = genericType.getType(); + if (type instanceof ParameterizedType) { + Type[] arguments = ((ParameterizedType) type).getActualTypeArguments(); + if (arguments.length == 1) { + Type argument = arguments[0]; + return argument instanceof ParameterizedType + && ((ParameterizedType) argument).getRawType().equals(Optional.class); + } + } + } + return false; +} +``` + +### Packaging the producers in a service + +Once all the producers are ready, we package them in a class that implements +[MapperResultProducerService]: + +```java +public class GuavaFutureProducerService implements MapperResultProducerService { + @Override + public Iterable getProducers() { + return Arrays.asList( + // Order matters, the most specific must come first. + new FutureOfVoidProducer(), new FutureOfEntityProducer()); + } +} +``` + +As hinted previously, the order of the producers matter: they will be tried from left to right. +Since our "future of entity" producer would also match `Void`, it must come last. + +The mapper uses the Java Service Provider mechanism to register producers: create a new file +`META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService`, +containing the name of the implementation: + +``` +some.package.name.GuavaFutureProducerService +``` + +You can put the producers, service and service descriptor directly in your application, or +distribute them as a standalone JAR if you intend to reuse them. + +### Disabling custom types + +Custom types are handled at runtime. This goes a bit against the philosophy of the rest of the +object mapper, where most of the work is done at compile time thanks to annotation processing. There +are ways to extend the mapper processor, but we feel that this would be too complicated for this use +case. + +One downside is that validation can now only be done at runtime: if you use a return type that isn't +supported by any producer, you'll only find out when you call the method. + +**If you don't use custom types at all**, you can disable the feature with an annotation processor +flag: + +```xml + + + + maven-compiler-plugin + + -Acom.datastax.oss.driver.mapper.customResults.enabled=false + + + + +``` + +With this configuration, if a DAO method declares a non built-in return type, it will be surfaced as +a compiler error. + +[EntityHelper]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/EntityHelper.html +[GenericType]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/reflect/GenericType.html +[MapperContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/MapperContext.html +[MapperResultProducer]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.html +[MapperResultProducerService]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.html diff --git a/manual/mapper/daos/delete/README.md b/manual/mapper/daos/delete/README.md index 70c5e57d607..e67ecdc8a6e 100644 --- a/manual/mapper/daos/delete/README.md +++ b/manual/mapper/daos/delete/README.md @@ -1,3 +1,22 @@ + + ## Delete methods Annotate a DAO method with [@Delete] to generate a query that deletes an [Entity](../../entities): @@ -77,9 +96,10 @@ placeholders: void deleteIfDescriptionMatches(UUID productId, String expectedDescription); ``` -A [StatementAttributes](../statement_attributes/) can be added as the **last** parameter. This -allows you to customize customize certain aspects of the request (page size, timeout, etc.) at -runtime. +A `Function` or `UnaryOperator` +can be added as the **last** parameter. It will be applied to the statement before execution. This +allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See +[statement attributes](../statement_attributes/). ### Return type @@ -104,6 +124,14 @@ The method can return: ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription); // if the condition fails, the result set will contain columns '[applied]' and 'description' ``` + +* a [BoundStatement]. This is intended for queries where you will execute this statement later + or in a batch. + + ```java + @Delete + BoundStatement delete(Product product); + ``` * a [CompletionStage] or [CompletableFuture] of any of the above. The method will execute the query asynchronously. Note that for result sets, you need to switch to [AsyncResultSet]. @@ -118,6 +146,15 @@ The method can return: @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") CompletionStage deleteIfDescriptionMatchesAsync(UUID productId, String expectedDescription); ``` + +* a [ReactiveResultSet]. + + ```java + @Delete + ReactiveResultSet deleteReactive(Product product); + ``` + +* a [custom type](../custom_types). Note that you can also return a boolean or result set for non-conditional queries, but there's no practical purpose for that since those queries always return `wasApplied = true` and an empty result @@ -133,13 +170,16 @@ If a table was specified when creating the DAO, then the generated query targets Otherwise, it uses the default table name for the entity (which is determined by the name of the entity class and the [naming strategy](../../entities/#naming-strategy)). -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@Delete]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Delete.html -[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- +[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- +[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html +[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html +[@Delete]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Delete.html +[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html + [CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html \ No newline at end of file +[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html diff --git a/manual/mapper/daos/getentity/README.md b/manual/mapper/daos/getentity/README.md index 90aef52a455..de9a530b558 100644 --- a/manual/mapper/daos/getentity/README.md +++ b/manual/mapper/daos/getentity/README.md @@ -1,3 +1,22 @@ + + ## GetEntity methods Annotate a DAO method with [@GetEntity] to convert a core driver data structure into one or more @@ -23,6 +42,54 @@ product.setDescription(row.get("description", String.class)); It does not perform a query. Instead, those methods are intended for cases where you already have a query result, and just need the conversion logic. +### Lenient mode + +By default, the mapper operates in "strict" mode: the source row must contain a matching column for +every property in the entity definition, *including computed ones*. If such a column is not found, +an error will be thrown. + +Starting with driver 4.12.0, the `@GetEntity` annotation has a new `lenient` attribute. If this +attribute is explicitly set to `true`, the mapper will operate in "lenient" mode: all entity +properties that have a matching column in the source row will be set. However, *unmatched properties +will be left untouched*. + +As an example to illustrate how lenient mode works, assume that we have the following entity and +DAO: + +```java +@Entity class Product { + + @PartitionKey int id; + String description; + float price; + // other members omitted +} + +interface ProductDao { + + @GetEntity(lenient = true) + Product getLenient(Row row); + +} +``` + +Then the following code would be possible: + +```java +// row does not contain the price column +Row row = session.execute("SELECT id, description FROM product").one(); +Product product = productDao.getLenient(row); +assert product.price == 0.0; +``` + +Since no `price` column was found in the source row, `product.price` wasn't set and was left to its +default value (0.0). Without lenient mode, the code above would throw an error instead. + +Lenient mode allows to achieve the equivalent of driver 3.x [manual mapping +feature](https://docs.datastax.com/en/developer/java-driver/3.10/manual/object_mapper/using/#manual-mapping). + +**Beware that lenient mode may result in incomplete entities being produced.** + ### Parameters The method must have a single parameter. The following types are allowed: @@ -41,7 +108,7 @@ The method can return: * a single entity instance. If the argument is a result set type, the generated code will extract the first row and convert it, or return `null` if the result set is empty. - ````java + ```java @GetEntity Product asProduct(Row row); @@ -56,7 +123,20 @@ The method can return: @GetEntity PagingIterable asProducts(ResultSet resultSet); ``` - + +* a [Stream] of an entity class. In that case, the type of the parameter **must** be [ResultSet]. + Each row in the result set will be converted into an entity instance. + + Note: even if streams are lazily evaluated, results are fetched synchronously; therefore, as the + returned stream is traversed, blocking calls may occur, as more results are fetched from the + server in the background. For details about the stream's characteristics, see + [PagingIterable.spliterator]. + + ```java + @GetEntity + Stream asProducts(ResultSet resultSet); + ``` + * a [MappedAsyncPagingIterable] of an entity class. In that case, the type of the parameter **must** be [AsyncResultSet]. Each row in the result set will be converted into an entity instance. @@ -69,15 +149,17 @@ If the return type doesn't match the parameter type (for example [PagingIterable [AsyncResultSet]), the mapper processor will issue a compile-time error. -[@GetEntity]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/GetEntity.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[GettableByName]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/data/GettableByName.html -[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/PagingIterable.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html -[Row]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/Row.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/data/UdtValue.html +[@GetEntity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/GetEntity.html +[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html +[GettableByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/GettableByName.html +[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html +[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html +[PagingIterable.spliterator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html#spliterator-- +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html +[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html +[Stream]: https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html diff --git a/manual/mapper/daos/increment/README.md b/manual/mapper/daos/increment/README.md new file mode 100644 index 00000000000..44b017be2e1 --- /dev/null +++ b/manual/mapper/daos/increment/README.md @@ -0,0 +1,105 @@ + + +## Increment methods + +Annotate a DAO method with [@Increment] to generate a query that updates a counter table that is +mapped to an entity: + +```java +// CREATE TABLE votes(article_id int PRIMARY KEY, up_votes counter, down_votes counter); + +@Entity +public class Votes { + @PartitionKey private int articleId; + private long upVotes; + private long downVotes; + ... // constructor(s), getters and setters, etc. +} + +@Dao +public interface VotesDao { + @Increment(entityClass = Votes.class) + void incrementUpVotes(int articleId, long upVotes); + + @Increment(entityClass = Votes.class) + void incrementDownVotes(int articleId, long downVotes); + + @Select + Votes findById(int articleId); +} +``` + +### Parameters + +The entity class must be specified with `entityClass` in the annotation. + +The method's parameters must start with the [full primary key](../../entities/#primary-key-columns), +in the exact order (as defined by the [@PartitionKey] and [@ClusteringColumn] annotations in the +entity class). The parameter names don't necessarily need to match the names of the columns, but the +types must match. Unlike other methods like [@Select](../select/) or [@Delete](../delete/), counter +updates cannot operate on a whole partition, they need to target exactly one row; so all the +partition key and clustering columns must be specified. + +Then must follow one or more parameters representing counter increments. Their type must be +`long` or `java.lang.Long`. The name of the parameter must match the name of the entity +property that maps to the counter (that is, the name of the getter without "get" and +decapitalized). Alternatively, you may annotate a parameter with [@CqlName] to specify the +raw column name directly; in that case, the name of the parameter does not matter: + +```java +@Increment(entityClass = Votes.class) +void incrementUpVotes(int articleId, @CqlName("up_votes") long foobar); +``` + +When you invoke the method, each parameter value is interpreted as a **delta** that will be applied +to the counter. In other words, if you pass 1, the counter will be incremented by 1. Negative values +are allowed. If you are using Cassandra 2.2 or above, you can use `Long` and pass `null` for some of +the parameters, they will be ignored (following [NullSavingStrategy#DO_NOT_SET](../null_saving/) +semantics). If you are using Cassandra 2.1, `null` values will trigger a runtime error. + +A `Function` or `UnaryOperator` +can be added as the **last** parameter. It will be applied to the statement before execution. This +allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See +[statement attributes](../statement_attributes/). + +### Return type + +The method can return `void`, a void [CompletionStage] or [CompletableFuture], or a +[ReactiveResultSet]. + +### Target keyspace and table + +If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the +generated query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work +if the mapper was built from a session that has a [default keyspace] set. + +If a table was specified when creating the DAO, then the generated query targets that table. +Otherwise, it uses the default table name for the entity (which is determined by the name of the +entity class and the naming convention). + +[@Increment]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Increment.html +[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html +[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- +[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html +[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html +[@CqlName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/CqlName.html + +[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html +[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html diff --git a/manual/mapper/daos/insert/README.md b/manual/mapper/daos/insert/README.md index 7c5b6ae015c..b90ffa33a32 100644 --- a/manual/mapper/daos/insert/README.md +++ b/manual/mapper/daos/insert/README.md @@ -1,3 +1,22 @@ + + ## Insert methods Annotate a DAO method with [@Insert] to generate a query that inserts an [Entity](../../entities): @@ -25,9 +44,10 @@ void insertWithTtl(Product product, int ttl); The annotation can define a [null saving strategy](../null_saving/) that applies to the properties of the entity to insert. -A [StatementAttributes](../statement_attributes/) can be added as the **last** parameter. This -allows you to customize customize certain aspects of the request (page size, timeout, etc.) at -runtime. +A `Function` or `UnaryOperator` +can be added as the **last** parameter. It will be applied to the statement before execution. This +allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See +[statement attributes](../statement_attributes/). ### Return type @@ -67,6 +87,12 @@ The method can return: @Insert ResultSet save(Product product); ``` +* a [BoundStatement]. This is intended for cases where you intend to execute this statement later or in a batch: + + ```java + @Insert + BoundStatement save(Product product); + ``` * a [CompletionStage] or [CompletableFuture] of any of the above. The mapper will execute the query asynchronously. @@ -82,6 +108,15 @@ The method can return: CompletableFuture> insertIfNotExists(Product product); ``` +* a [ReactiveResultSet]. + + ```java + @Insert + ReactiveResultSet insertReactive(Product product); + ``` + +* a [custom type](../custom_types). + ### Target keyspace and table If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the @@ -92,13 +127,13 @@ If a table was specified when creating the DAO, then the generated query targets Otherwise, it uses the default table name for the entity (which is determined by the name of the entity class and the [naming strategy](../../entities/#naming-strategy)). -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@Insert]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Insert.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- -[ResultSet#getExecutionInfo()]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html#getExecutionInfo-- - - +[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- +[@Insert]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Insert.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- +[ResultSet#getExecutionInfo()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#getExecutionInfo-- +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html [CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html [CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html diff --git a/manual/mapper/daos/null_saving/README.md b/manual/mapper/daos/null_saving/README.md index e54b90e6e9f..eed98934356 100644 --- a/manual/mapper/daos/null_saving/README.md +++ b/manual/mapper/daos/null_saving/README.md @@ -1,3 +1,22 @@ + + ## Null saving strategy The null saving strategy controls how null entity properties are handled when writing to the @@ -93,10 +112,10 @@ public interface UserDao extends InventoryDao { } ``` -[@DefaultNullSavingStrategy]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[MapperException]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/MapperException.html -[DO_NOT_SET]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.html#DO_NOT_SET -[SET_TO_NULL]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.html#SET_TO_NULL +[@DefaultNullSavingStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[MapperException]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/MapperException.html +[DO_NOT_SET]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.html#DO_NOT_SET +[SET_TO_NULL]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.html#SET_TO_NULL [CASSANDRA-7304]: https://issues.apache.org/jira/browse/CASSANDRA-7304 diff --git a/manual/mapper/daos/query/README.md b/manual/mapper/daos/query/README.md index 4261516c5ee..a11753da880 100644 --- a/manual/mapper/daos/query/README.md +++ b/manual/mapper/daos/query/README.md @@ -1,3 +1,22 @@ + + ## Query methods Annotate a DAO method with [@Query] to provide your own query string: @@ -25,9 +44,10 @@ long countByIdAndYear(int id, int year); The annotation can define a [null saving strategy](../null_saving/) that applies to the method parameters. -A [StatementAttributes](../statement_attributes/) can be added as the **last** parameter. This -allows you to customize customize certain aspects of the request (page size, timeout, etc.) at -runtime. +A `Function` or `UnaryOperator` +can be added as the **last** parameter. It will be applied to the statement before execution. This +allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See +[statement attributes](../statement_attributes/). ### Return type @@ -53,11 +73,21 @@ The method can return: * a [ResultSet]. The method will return the raw query result, without any conversion. +* a [BoundStatement]. This is intended for queries where you will execute this statement later + or in a batch. + * a [PagingIterable]. The method will convert each row into an entity instance. +* a [Stream]. The method will convert each row into an entity instance. For details about the + stream's characteristics, see [PagingIterable.spliterator]. + * a [CompletionStage] or [CompletableFuture] of any of the above. The method will execute the query asynchronously. Note that for result sets and iterables, you need to switch to the asynchronous equivalent [AsyncResultSet] and [MappedAsyncPagingIterable] respectively. + +* a [ReactiveResultSet], or a [MappedReactiveResultSet] of the entity class. + +* a [custom type](../custom_types). ### Target keyspace and table @@ -102,15 +132,20 @@ Then: query succeeds or not depends on whether the session that the mapper was built with has a [default keyspace]. -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@Query]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Query.html -[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html -[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- -[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/PagingIterable.html -[Row]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/Row.html +[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- +[@Query]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Query.html +[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[ResultSet#wasApplied()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html#wasApplied-- +[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html +[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html +[PagingIterable.spliterator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html#spliterator-- +[Row]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/Row.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html +[MappedReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.html [CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html [CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html [Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html +[Stream]: https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html diff --git a/manual/mapper/daos/queryprovider/README.md b/manual/mapper/daos/queryprovider/README.md index 5a7b3443264..593a3a6b1a4 100644 --- a/manual/mapper/daos/queryprovider/README.md +++ b/manual/mapper/daos/queryprovider/README.md @@ -1,3 +1,22 @@ + + ## Query provider methods Annotate a DAO method with [@QueryProvider] to delegate the execution of the query to one of your @@ -137,11 +156,11 @@ Here is the full implementation: the desired [PagingIterable][PagingIterable]. -[@QueryProvider]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html -[providerClass]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#providerClass-- -[entityHelpers]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#entityHelpers-- -[providerMethod]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#providerMethod-- -[MapperContext]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/MapperContext.html -[EntityHelper]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/EntityHelper.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/PagingIterable.html +[@QueryProvider]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html +[providerClass]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#providerClass-- +[entityHelpers]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#entityHelpers-- +[providerMethod]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.html#providerMethod-- +[MapperContext]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/MapperContext.html +[EntityHelper]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/EntityHelper.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html diff --git a/manual/mapper/daos/select/README.md b/manual/mapper/daos/select/README.md index 6a2f6ce956b..fb6c4ca2077 100644 --- a/manual/mapper/daos/select/README.md +++ b/manual/mapper/daos/select/README.md @@ -1,3 +1,22 @@ + + ## Select methods Annotate a DAO method with [@Select] to generate a query that selects one or more rows, and maps @@ -13,11 +32,11 @@ public interface ProductDao { ### Parameters -If the annotation doesn't have a `customWhereClause`, the mapper defaults to a selection by primary -key (partition key + clustering columns). The method's parameters must match the types of the -[primary key columns](../../entities/#primary-key-columns), in the exact order (as defined by the -[@PartitionKey] and [@ClusteringColumn] annotations). The parameter names don't necessarily need to -match the names of the columns. +If the annotation doesn't have a [customWhereClause()], the mapper defaults to a selection by +primary key (partition key + clustering columns). The method's parameters must match the types of +the [primary key columns](../../entities/#primary-key-columns), in the exact order (as defined by +the [@PartitionKey] and [@ClusteringColumn] annotations). The parameter names don't necessarily need +to match the names of the columns. To select more than one entity within a partition, a subset of primary key components may be specified as long as enough parameters are provided to account for the partition key. @@ -51,7 +70,7 @@ public interface ProductDao { } ``` -If the annotation has a `customWhereClause`, it completely replaces the WHERE clause. The provided +If the annotation has a [customWhereClause()], it completely replaces the WHERE clause. The provided string can contain named placeholders. In that case, the method must have a corresponding parameter for each, with the same name and a compatible Java type. @@ -60,9 +79,25 @@ for each, with the same name and a compatible Java type. PagingIterable findByDescription(String searchString); ``` -A [StatementAttributes](../statement_attributes/) can be added as the **last** parameter. This -allows you to customize customize certain aspects of the request (page size, timeout, etc.) at -runtime. +The generated SELECT query can be further customized with [limit()], [perPartitionLimit()], +[orderBy()], [groupBy()] and [allowFiltering()]. Some of these clauses can also contain placeholders +whose values will be provided through additional method parameters. Note that it is sometimes not +possible to determine if a parameter is a primary key component or a placeholder value; therefore +the rule is that **if your method takes a partial primary key, the first parameter that is not a +primary key component must be explicitly annotated with +[@CqlName](../../entities/#user-provided-names)**. For example if the primary key is `((day int, +hour int, minute int), ts timestamp)`: + +```java +// Annotate 'l' so that it's not mistaken for the second PK component +@Select(limit = ":l") +PagingIterable findDailySales(int day, @CqlName("l") int l); +``` + +A `Function` or `UnaryOperator` +can be added as the **last** parameter. It will be applied to the statement before execution. This +allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See +[statement attributes](../statement_attributes/). ### Return type @@ -92,6 +127,19 @@ In all cases, the method can return: PagingIterable findByDescription(String searchString); ``` +* a [Stream] of the entity class. It behaves like a result set, except that each element is a mapped + entity instead of a row. + + Note: even if streams are lazily evaluated, the query will be executed synchronously; also, as + the returned stream is traversed, more blocking calls may occur, as more results are fetched + from the server in the background. For details about the stream's characteristics, see + [PagingIterable.spliterator]. + + ```java + @Select(customWhereClause = "description LIKE :searchString") + Stream findByDescription(String searchString); + ``` + * a [CompletionStage] or [CompletableFuture] of any of the above. The method will execute the query asynchronously. Note that for iterables, you need to switch to the asynchronous equivalent [MappedAsyncPagingIterable]. @@ -107,6 +155,20 @@ In all cases, the method can return: CompletionStage> findByDescriptionAsync(String searchString); ``` + For streams, even if the initial query is executed asynchronously, traversing the returned + stream may block the traversing thread. Blocking calls can indeed be required as more results + are fetched from the server in the background. For this reason, _the usage of + `CompletionStage>` cannot be considered as a fully asynchronous execution method_. + +* a [MappedReactiveResultSet] of the entity class. + + ```java + @Select(customWhereClause = "description LIKE :searchString") + MappedReactiveResultSet findByDescriptionReactive(String searchString); + ``` + +* a [custom type](../custom_types). + ### Target keyspace and table If a keyspace was specified [when creating the DAO](../../mapper/#dao-factory-methods), then the @@ -117,13 +179,22 @@ If a table was specified when creating the DAO, then the generated query targets Otherwise, it uses the default table name for the entity (which is determined by the name of the entity class and the [naming strategy](../../entities/#naming-strategy)). -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[@Select]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Select.html -[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html -[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/PagingIterable.html +[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- +[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html +[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html +[@Select]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html +[allowFiltering()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#allowFiltering-- +[customWhereClause()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#customWhereClause-- +[groupBy()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#groupBy-- +[limit()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#limit-- +[orderBy()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#orderBy-- +[perPartitionLimit()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html#perPartitionLimit-- +[MappedAsyncPagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/MappedAsyncPagingIterable.html +[PagingIterable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html +[PagingIterable.spliterator]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/PagingIterable.html#spliterator-- +[MappedReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.html [CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html [CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html [Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html +[Stream]: https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html diff --git a/manual/mapper/daos/setentity/README.md b/manual/mapper/daos/setentity/README.md index 79a1fc0715f..eeb7957f62e 100644 --- a/manual/mapper/daos/setentity/README.md +++ b/manual/mapper/daos/setentity/README.md @@ -1,3 +1,22 @@ + + ## SetEntity methods Annotate a DAO method with [@SetEntity] to fill a core driver data structure from an @@ -21,6 +40,55 @@ boundStatement = boundStatement.set("description", product.getDescription(), Str It does not perform a query. Instead, those methods are intended for cases where you will execute the query yourself, and just need the conversion logic. +### Lenient mode + +By default, the mapper operates in "strict" mode: the target statement must contain a matching +column for every property in the entity definition, *except computed ones*. If such a column is not +found, an error will be thrown. + +Starting with driver 4.12.0, the `@SetEntity` annotation has a new `lenient` attribute. If this +attribute is explicitly set to `true`, the mapper will operate in "lenient" mode: all entity +properties that have a matching column in the target statement will be set. However, *unmatched +properties will be left untouched*. + +As an example to illustrate how lenient mode works, assume that we have the following entity and +DAO: + +```java +@Entity class Product { + + @PartitionKey int id; + String description; + float price; + // other members omitted +} + +interface ProductDao { + + @SetEntity(lenient = true) + BoundStatement setLenient(Product product, BoundStatement stmt); + +} +``` + +Then the following code would be possible: + +```java +Product product = new Product(1, "scented candle", 12.99); +// stmt does not contain the price column +BoundStatement stmt = session.prepare("INSERT INTO product (id, description) VALUES (?, ?)").bind(); +stmt = productDao.setLenient(product, stmt); +``` + +Since no `price` column was found in the target statement, `product.price` wasn't read (if the +statement is executed, the resulting row in the database will have a price of zero). Without lenient +mode, the code above would throw an error instead. + +Lenient mode allows to achieve the equivalent of driver 3.x [manual mapping +feature](https://docs.datastax.com/en/developer/java-driver/3.10/manual/object_mapper/using/#manual-mapping). + +**Beware that lenient mode may result in incomplete rows being inserted in the database.** + ### Parameters The method must have two parameters: one is the entity instance, the other must be a subtype of @@ -63,8 +131,8 @@ BoundStatement bind(Product product, BoundStatement statement); If you use a void method with [BoundStatement], the mapper processor will issue a compile-time warning. -[@SetEntity]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/SetEntity.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/BoundStatement.html -[BoundStatementBuilder]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.html -[SettableByName]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/data/SettableByName.html -[UdtValue]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/data/UdtValue.html +[@SetEntity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/SetEntity.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[BoundStatementBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatementBuilder.html +[SettableByName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/SettableByName.html +[UdtValue]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/data/UdtValue.html diff --git a/manual/mapper/daos/statement_attributes/README.md b/manual/mapper/daos/statement_attributes/README.md index 27ca395145a..f772df36775 100644 --- a/manual/mapper/daos/statement_attributes/README.md +++ b/manual/mapper/daos/statement_attributes/README.md @@ -1,3 +1,22 @@ + + ## Statement attributes The [@Delete](../delete/), [@Insert](../insert/), [@Query](../query/), [@Select](../select/) and @@ -60,4 +79,4 @@ Product product = dao.findById(1, builder -> builder.setConsistencyLevel(DefaultConsistencyLevel.QUORUM)); ``` -[@StatementAttributes]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.html \ No newline at end of file +[@StatementAttributes]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.html diff --git a/manual/mapper/daos/update/README.md b/manual/mapper/daos/update/README.md index 119d0f26569..87e9286c800 100644 --- a/manual/mapper/daos/update/README.md +++ b/manual/mapper/daos/update/README.md @@ -1,3 +1,22 @@ + + ## Update methods Annotate a DAO method with [@Update] to generate a query that updates one or more @@ -73,9 +92,10 @@ template.setDescription("Coming soon"); // all other properties remain null dao.updateWhereIdIn(template, 42, 43); // Will only update 'description' on the selected rows ``` -A [StatementAttributes](../statement_attributes/) can be added as the **last** parameter. This -allows you to customize customize certain aspects of the request (page size, timeout, etc.) at -runtime. +A `Function` or `UnaryOperator` +can be added as the **last** parameter. It will be applied to the statement before execution. This +allows you to customize certain aspects of the request (page size, timeout, etc) at runtime. See +[statement attributes](../statement_attributes/). ### Return type @@ -100,6 +120,13 @@ The method can return: ResultSet updateIfExists(Product product); // if the condition fails, the result set will contain columns '[applied]' and 'description' ``` + +* a [BoundStatement]. This is intended for queries where you will execute this statement later or in a batch: + + ```java + @Update + BoundStatement update(Product product); + ``` * a [CompletionStage] or [CompletableFuture] of any of the above. The mapper will execute the query asynchronously. @@ -115,6 +142,15 @@ The method can return: @Update(customIfClause = "description = :expectedDescription") CompletableFuture updateIfDescriptionMatches(Product product, String expectedDescription); ``` + +* a [ReactiveResultSet]. + + ```java + @Update + ReactiveResultSet updateReactive(Product product); + ``` + +* a [custom type](../custom_types). ### Target keyspace and table @@ -126,11 +162,13 @@ If a table was specified when creating the DAO, then the generated query targets Otherwise, it uses the default table name for the entity (which is determined by the name of the entity class and the naming convention). -[default keyspace]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- -[@Update]: https://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Update.html +[default keyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withKeyspace-com.datastax.oss.driver.api.core.CqlIdentifier- +[@Update]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Update.html -[AsyncResultSet]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html -[Boolean]: https://docs.oracle.com/javase/8/docs/api/index.html?java/lang/Boolean.html -[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html -[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html -[ResultSet]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/cql/ResultSet.html +[AsyncResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/AsyncResultSet.html +[Boolean]: https://docs.oracle.com/javase/8/docs/api/index.html?java/lang/Boolean.html +[CompletionStage]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html +[CompletableFuture]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/ResultSet.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/cql/BoundStatement.html +[ReactiveResultSet]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/core/cql/reactive/ReactiveResultSet.html diff --git a/manual/mapper/entities/README.md b/manual/mapper/entities/README.md index 135292ea136..978c781245f 100644 --- a/manual/mapper/entities/README.md +++ b/manual/mapper/entities/README.md @@ -1,5 +1,43 @@ + + ## Entities +### Quick overview + +POJO annotated with [@Entity], must expose a no-arg constructor. + +* class-level annotations: + * [@NamingStrategy] + * [@CqlName] + * [@HierarchyScanStrategy] + * [@PropertyStrategy] +* field/method-level annotations: + * [@PartitionKey], [@ClusteringColumn] + * [@Computed] + * [@Transient] + * [@CqlName] +* can inherit annotated fields/methods and [@NamingStrategy]. Only use [@Entity] on concrete + classes. + +----- + An entity is a Java class that will be mapped to a Cassandra table or [UDT](../../core/udts). Entities are used as arguments or return types of [DAO](../daos/) methods; they can also be nested inside other entities (to map UDT columns). @@ -19,19 +57,105 @@ public class Product { } ``` -Each entity property will be mapped to a CQL column. In order to detect a property: +Each entity property will be mapped to a CQL column. The way properties are detected is +configurable, as explained below: + +### Property detection -* there **must** be a getter method that follows the usual naming convention (e.g. `getDescription`) - and has no parameters. The name of the property is obtained by removing the "get" prefix and - decapitalizing (`description`), and the type of the property is the return type of the getter. -* there **must** be a matching setter method (`setDescription`), with a single parameter that has - the same type as the property (the return type does not matter). +#### Mutability -There *may* also be a matching field (`description`) that has the same type as the property, but -this is not mandatory: a property can have only a getter and a setter (for example if the value is -computed, or the field has a different name, or is nested into another field, etc.) +By default, the mapper expects mutable entity classes: + +```java +@Entity +public class Product { + @PartitionKey private UUID productId; + + public Product() {} + + public UUID getProductId() { return productId; } + public void setProductId(UUID productId) { this.productId = productId; } +} +``` + +With mutable entities: + +* each entity property: + * **must** have a non-void, no-argument getter method. + * **must** have a corresponding setter method: matching name, and exactly one argument matching + the getter's return type. Note that the return type of the setter does not matter. + * *may* have a corresponding field: matching name and type. +* the type **must** expose a non-private, no-argument constructor. + +When the mapper reads a mutable entity from the database, it will invoke the no-argument +constructor to materialize the instance, and then read and set the properties one by one. + +You can switch to an immutable style with the [@PropertyStrategy] annotation: + +```java +@Entity +@PropertyStrategy(mutable = false) +public class ImmutableProduct { + @PartitionKey private final UUID productId; + + public ImmutableProduct(UUID productId) { this.productId = productId; } + + public UUID getProductId() { return productId; } +} +``` + +With immutable entities: + +* each entity property: + * **must** have a non-void, no-argument getter method. The mapper will not look for a setter. + * *may* have a corresponding field: matching name and type. You'll probably want to make that + field final (although that has no impact on the mapper-generated code). +* the type **must** expose a non-private constructor that takes every + non-[transient](#transient-properties) property, in the declaration order. -The class must expose a no-arg constructor that is at least package-private. +When the mapper reads an immutable entity from the database, it will first read all properties, then +invoke the "all columns" constructor to materialize the instance. + +Note: the "all columns" constructor must take the properties in the order that they are declared in +the entity. If the entity inherits properties from parent types, those must come last in the +constructor signature, ordered from the closest parent to the farthest. If things get too +complicated, a good trick is to deliberately omit the constructor to let the mapper processor fail: +the error message describes the expected signature. + +#### Accessor styles + +By default, the mapper looks for JavaBeans-style accessors: getter prefixed with "get" (or "is" for +boolean properties) and, if the entity is mutable, setter prefixed with "set": + +```java +@Entity +public class Product { + @PartitionKey private UUID productId; + + public UUID getProductId() { return productId; } + public void setProductId(UUID productId) { this.productId = productId; } +} +``` + +You can switch to a "fluent" style (no prefixes) with the [@PropertyStrategy] annotation: + +```java +import static com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle; +import static com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; + +@Entity +@PropertyStrategy(getterStyle = GetterStyle.FLUENT, setterStyle = SetterStyle.FLUENT) +public class Product { + @PartitionKey private UUID productId; + + public UUID productId() { return productId; } + public void productId(UUID productId) { this.productId = productId; } +} +``` + +Note that if you use the fluent style with immutable entities, Java's built-in `hashCode()` and +`toString()` methods would qualify as properties. The mapper skips them automatically. If you have +other false positives that you'd like to ignore, mark them as [transient](#transient-properties). ### Naming strategy @@ -251,6 +375,11 @@ i.e.: private transient int notAColumn; ``` +#### Custom column name + +Override the CQL name manually with [@CqlName], see [User-provided names](#user-provided-names) +above. + ### Default keyspace You can specify a default keyspace to use when doing operations on a given entity: @@ -445,21 +574,22 @@ the same level. To control how the class hierarchy is scanned, annotate classes with [@HierarchyScanStrategy]. -[@ClusteringColumn]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html -[@CqlName]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/CqlName.html -[@Dao]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Dao.html -[@Entity]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Entity.html -[NameConverter]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.html -[NamingConvention]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.html -[@NamingStrategy]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.html -[@PartitionKey]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html -[@Computed]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Computed.html -[@Select]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Select.html -[@Insert]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Insert.html -[@Update]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Update.html -[@GetEntity]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/GetEntity.html -[@Query]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Query.html +[@ClusteringColumn]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.html +[@CqlName]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/CqlName.html +[@Dao]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Dao.html +[@Entity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Entity.html +[NameConverter]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.html +[NamingConvention]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.html +[@NamingStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.html +[@PartitionKey]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.html +[@Computed]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Computed.html +[@Select]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Select.html +[@Insert]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Insert.html +[@Update]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Update.html +[@GetEntity]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/GetEntity.html +[@Query]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Query.html [aliases]: http://cassandra.apache.org/doc/latest/cql/dml.html?#aliases -[@Transient]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Transient.html -[@TransientProperties]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.html -[@HierarchyScanStrategy]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.html +[@Transient]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Transient.html +[@TransientProperties]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.html +[@HierarchyScanStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.html +[@PropertyStrategy]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.html diff --git a/manual/mapper/mapper/README.md b/manual/mapper/mapper/README.md index 99b1d4fa762..752424c9a3b 100644 --- a/manual/mapper/mapper/README.md +++ b/manual/mapper/mapper/README.md @@ -1,5 +1,34 @@ + + ## Mapper interface +### Quick overview + +Interface annotated with [@Mapper], entry point to mapper features. + +* a corresponding builder gets generated (default: `[YourInterfacesName]Builder`). +* defines [@DaoFactory] methods that provide DAO instances. They can be parameterized by keyspace + and/or table. + +----- + The mapper interface is the top-level entry point to mapping features. It wraps a core driver session, and acts as a factory of [DAO](../daos/) objects that will be used to execute requests. @@ -29,7 +58,7 @@ public interface InventoryMapper { ``` The builder allows you to create a mapper instance, by wrapping a core `CqlSession` (if you need -more details on how to create a session, refer to the [core driver documentation](../core/)). +more details on how to create a session, refer to the [core driver documentation](../../core/)). ```java CqlSession session = CqlSession.builder().build(); @@ -81,6 +110,15 @@ ProductDao productDao(@DaoKeyspace String keyspace); ProductDao productDao(@DaoTable CqlIdentifier table); ``` +You can also specify a default keyspace when building the mapper, it will be used for all methods +that don't have a `@DaoKeyspace` parameter: + +```java +InventoryMapper inventoryMapper = new InventoryMapperBuilder(session) + .withDefaultKeyspace("keyspace1") + .build(); +``` + The mapper maintains an interface cache. Calling a factory method with the same arguments will yield the same DAO instance: @@ -92,6 +130,8 @@ assert dao1 == dao2; ### DAO parameterization +#### Keyspace and table + The mapper allows you to reuse the same DAO interface for different tables. For example, given the following definitions: @@ -140,8 +180,77 @@ ProductDao dao3 = inventoryMapper.productDao("keyspace3", "table3"); The DAO's keyspace and table can also be injected into custom query strings; see [Query methods](../daos/query/). -[CqlIdentifier]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/core/CqlIdentifier.html -[@DaoFactory]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.html -[@DaoKeyspace]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.html -[@DaoTable]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/DaoTable.html -[@Mapper]: http://docs.datastax.com/en/drivers/java/4.0/com/datastax/oss/driver/api/mapper/annotations/Mapper.html +#### Execution profile + +Similarly, a DAO can be parameterized to use a particular [configuration +profile](../../core/configuration/#execution-profiles): + +```java +@Mapper +public interface InventoryMapper { + @DaoFactory + ProductDao productDao(@DaoProfile String profileName); + + @DaoFactory + ProductDao productDao(@DaoProfile DriverExecutionProfile profile); +} +``` + +The mapper will call `setExecutionProfileName` / `setExecutionProfile` on every generated statement. + +### Schema validation + +The mapper validates entity mappings against the database schema at runtime. This check is performed +every time you initialize a new DAO: + +```java +// Checks that entity 'Product' can be mapped to table or UDT 'keyspace1.product' +ProductDao dao1 = inventoryMapper.productDao("keyspace1", "product"); + +// Checks that entity 'Product' can be mapped to table or UDT 'keyspace2.product' +ProductDao dao2 = inventoryMapper.productDao("keyspace2", "product"); +``` + +For each entity referenced in the DAO, the mapper tries to find a schema element with the +corresponding name (according to the [naming strategy](../entities/#naming-strategy)). It tries +tables first, then falls back to UDTs if there is no match. You can speed up this process by +providing a hint: + +```java +import static com.datastax.oss.driver.api.mapper.annotations.SchemaHint.TargetElement.UDT; +import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; + +@Entity +@SchemaHint(targetElement = UDT) +public class Address { ... } +``` + +The following checks are then performed: + +* for each entity field, the database table or UDT must contain a column with the corresponding name + (according to the [naming strategy](../entities/#naming-strategy)). +* the types must be compatible, either according to the [default type + mappings](../../core/#cql-to-java-type-mapping), or via a [custom + codec](../../core/custom_codecs/) registered with the session. +* additionally, if the target element is a table, the primary key must be [properly + annotated](../entities/#primary-key-columns) in the entity. + +If any of those steps fails, an `IllegalArgumentException` is thrown. + +Schema validation adds a small startup overhead, so once your application is stable you may want to +disable it: + +```java +InventoryMapper inventoryMapper = new InventoryMapperBuilder(session) + .withSchemaValidationEnabled(false) + .build(); +``` + +You can also permanently disable validation of an individual entity by annotating it with +`@SchemaHint(targetElement = NONE)`. + +[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html +[@DaoFactory]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.html +[@DaoKeyspace]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.html +[@DaoTable]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/DaoTable.html +[@Mapper]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/mapper/annotations/Mapper.html diff --git a/manual/osgi/README.md b/manual/osgi/README.md index 7ee77bfec24..92cd4625b68 100644 --- a/manual/osgi/README.md +++ b/manual/osgi/README.md @@ -1,3 +1,22 @@ + + # OSGi The driver is available as an [OSGi] bundle. More specifically, the following maven artifacts are @@ -5,8 +24,17 @@ valid OSGi bundles: - `java-driver-core` - `java-driver-query-builder` +- `java-driver-mapper-runtime` - `java-driver-core-shaded` +Note: some of the driver dependencies are not valid OSGi bundles. Most of them are optional, and the +driver can work properly without them (see the +[Integration>Driver dependencies](../core/integration/#driver-dependencies) section for more +details); in such cases, the corresponding packages are declared with optional resolution in +`Import-Package` directives. However, if you need to access such packages in an OSGi container you +MUST wrap the corresponding jar in a valid OSGi bundle and make it available for provisioning to the +OSGi runtime. + ## Using the shaded jar `java-driver-core-shaded` shares the same bundle name as `java-driver-core` @@ -18,35 +46,93 @@ an explicit version of dependency in your project different than that of the dri In several places of the [driver configuration] it is possible to specify the class name of something to be instantiated by the driver such as the reconnection policy. This is accomplished -using reflection, which uses a `ClassLoader`. By default, the driver uses `Thread.currentThread -.getContextClassLoader()` if available, otherwise it uses its own `ClassLoader`. This is typically -adequate except in environments like application containers or OSGi frameworks where class loading -logic is much more deliberate and libraries are isolated from each other. +using reflection, which uses a `ClassLoader`. By default, the driver uses its own bundle's +`ClassLoader` to instantiate classes by reflection. This is typically adequate as long as the driver +bundle has access to the bundle where the implementing class resides. + +However if the default `ClassLoader` cannot load the implementing class, you may encounter an error +like this: -If the chosen `ClassLoader` is not able to ascertain whether a loaded class is the same instance -as its expected parent type, you may encounter an error such as: + java.lang.ClassNotFoundException: com.datastax.oss.MyCustomReconnectionPolicy + +Similarly, it also happens that the default `ClassLoader` is able to load the implementing class but +is not able to ascertain whether that class implements the expected parent type. In these cases you +may encounter an error such as: java.lang.IllegalArgumentException: Expected class ExponentialReconnectionPolicy (specified by advanced.reconnection-policy.class) to be a subtype of com.datastax.oss.driver.api.core.connection.ReconnectionPolicy This is occurring because there is a disparity in the `ClassLoader`s used between the driver code -and the `ClassLoader` used to reflectively load the class (in this case, +and the `ClassLoader` used to reflectively load the class (in this case, `ExponentialReconnectionPolicy`). -You may also encounter `ClassNotFoundException` if the `ClassLoader` does not have access to the -class being loaded. - To overcome these issues, you may specify a `ClassLoader` instance when constructing a `Session` -by using [withClassLoader()]. In a lot of cases, it may be adequate to pass in the `ClassLoader` -from a `Class` that is part of the core driver, i.e.: +by using [withClassLoader()]. + +Alternatively, if you have access to the `BundleContext` (for example, if you are creating the +session in an `Activator` class) you can also obtain the bundle's `ClassLoader` the following way: ```java +BundleContext bundleContext = ...; +Bundle bundle = bundleContext.getBundle(); +BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); +ClassLoader classLoader = bundleWiring.getClassLoader(); CqlSession session = CqlSession.builder() - .withClassLoader(CqlSession.class.getClassLoader()) + .withClassLoader(classLoader) .build(); ``` +### Using a custom `ClassLoader` for application-bundled configuration resources + +In addition to specifying a `ClassLoader` when constructing a `Session`, you can also specify +a `ClassLoader` instance on certain `DriverConfigLoader` methods for cases when your OSGi +application bundle provides overrides to driver configuration defaults. This is typically done by +including an `application.conf` file in your application bundle. + +For example, you can use [DriverConfigLoader.fromDefaults(ClassLoader)] to use the driver's default +configuration mechanism while specifying a different class loader: + +```java +BundleContext bundleContext = ...; +Bundle bundle = bundleContext.getBundle(); +BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); +ClassLoader classLoader = bundleWiring.getClassLoader(); + +CqlSession session = CqlSession.builder() + .withClassLoader(classLoader) + .withConfigLoader(DriverConfigLoader.fromDefaults(classLoader)) + .build(); +``` + +The above configuration will look for resources named `application.conf` inside the application +bundle, using the right class loader for that. + +Similarly, if you want to use programmatic configuration in you application bundle, but still +want to be able to provide some configuration in an `application.conf` file, you can use +[DriverConfigLoader.programmaticBuilder(ClassLoader)]: + +```java +BundleContext bundleContext = ...; +Bundle bundle = bundleContext.getBundle(); +BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); +ClassLoader classLoader = bundleWiring.getClassLoader(); +DriverConfigLoader loader = + DriverConfigLoader.programmaticBuilder(classLoader) + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(5)) + .startProfile("slow") + .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30)) + .endProfile() + .build(); +CqlSession session = CqlSession.builder() + .withClassLoader(classLoader) + .withConfigLoader(loader) + .build(); +``` + +The above configuration will honor all programmatic settings, but will look for resources named +`application.conf` inside the application bundle, using the right class loader for that. + ## What does the "Error loading libc" DEBUG message mean? The driver is able to perform native system calls through [JNR] in some cases, for example to @@ -70,6 +156,8 @@ starting the driver: [driver configuration]: ../core/configuration [OSGi]:https://www.osgi.org -[JNR]: https://github.com/jnr/jnr-ffi -[withClassLoader()]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withClassLoader-java.lang.ClassLoader- -[JAVA-1127]:https://datastax-oss.atlassian.net/browse/JAVA-1127 \ No newline at end of file +[JNR]: https://github.com/jnr/jnr-posix +[withClassLoader()]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withClassLoader-java.lang.ClassLoader- +[JAVA-1127]:https://datastax-oss.atlassian.net/browse/JAVA-1127 +[DriverConfigLoader.fromDefaults(ClassLoader)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#fromDefaults-java.lang.ClassLoader- +[DriverConfigLoader.programmaticBuilder(ClassLoader)]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/config/DriverConfigLoader.html#programmaticBuilder-java.lang.ClassLoader- diff --git a/manual/query_builder/README.md b/manual/query_builder/README.md index 2ee2e7fdd3f..d1932b329e7 100644 --- a/manual/query_builder/README.md +++ b/manual/query_builder/README.md @@ -1,3 +1,22 @@ + + ## Query builder The query builder is a utility to **generate CQL queries programmatically**. For example, it could @@ -12,9 +31,9 @@ To use it in your application, add the following dependency: ```xml - com.datastax.oss + org.apache.cassandra java-driver-query-builder - 4.1.0 + ${driver.version} ``` @@ -38,11 +57,15 @@ try (CqlSession session = CqlSession.builder().build()) { #### Fluent API -All the starting methods are centralized in the [QueryBuilder] class. To get started, add the -following import: +All the starting methods are centralized in the [QueryBuilder] and [SchemaBuilder] classes. To get +started, add one of the following imports: ```java +// For DML queries, such as SELECT import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.*; + +// For DDL queries, such as CREATE TABLE +import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.*; ``` Choose the method matching your desired statement, for example `selectFrom`. Then use your IDE's @@ -66,6 +89,30 @@ SimpleStatement statement = select.build(); SimpleStatementBuilder builder = select.builder(); ``` +#### DataStax Enterprise + +The driver provides two additional entry points for DSE-specific queries: [DseQueryBuilder] and +[DseSchemaBuilder]. They extend their respective non-DSE counterparts, so anything that is available +on the default query builder can also be done with the DSE query builder. + +We recommend that you use those classes if you are targeting DataStax Enterprise; they will be +enriched in the future if DSE adds custom CQL syntax. + +Currently, the only difference is the support for the `DETERMINISTIC` and `MONOTONIC` keywords when +generating `CREATE FUNCTION` or `CREATE AGGREGATE` statements: + +```java +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseFunction; + +createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .deterministic() + .monotonic(); +// CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC +``` + #### Immutability All types in the fluent API are immutable. This means that every step creates a new object: @@ -184,6 +231,8 @@ For a complete tour of the API, browse the child pages in this manual: * [Terms](term/) * [Idempotence](idempotence/) -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html -[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/CqlIdentifier.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[CqlIdentifier]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/CqlIdentifier.html +[DseQueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.html +[DseSchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.html diff --git a/manual/query_builder/condition/README.md b/manual/query_builder/condition/README.md index 1c5af6551c6..1a6a37eb2ef 100644 --- a/manual/query_builder/condition/README.md +++ b/manual/query_builder/condition/README.md @@ -1,3 +1,22 @@ + + ## Conditions A condition is a clause that appears after the IF keyword in a conditional [UPDATE](../update/) or @@ -132,4 +151,4 @@ It is mutually exclusive with column conditions: if you previously specified col the statement, they will be ignored; conversely, adding a column condition cancels a previous IF EXISTS clause. -[Condition]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/condition/Condition.html +[Condition]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/condition/Condition.html diff --git a/manual/query_builder/delete/README.md b/manual/query_builder/delete/README.md index 1beb1f91fb4..8e97920ae9f 100644 --- a/manual/query_builder/delete/README.md +++ b/manual/query_builder/delete/README.md @@ -1,3 +1,22 @@ + + ## DELETE To start a DELETE query, use one of the `deleteFrom` methods in [QueryBuilder]. There are several @@ -141,5 +160,5 @@ deleteFrom("user") Conditions are a common feature used by UPDATE and DELETE, so they have a [dedicated page](../condition) in this manual. -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Selector]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/select/Selector.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[Selector]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/select/Selector.html diff --git a/manual/query_builder/idempotence/README.md b/manual/query_builder/idempotence/README.md index b064c81cdd6..2f97151d277 100644 --- a/manual/query_builder/idempotence/README.md +++ b/manual/query_builder/idempotence/README.md @@ -1,3 +1,22 @@ + + ## Idempotence in the query builder When you generate a statement (or a statement builder) from the query builder, it automatically @@ -39,7 +58,7 @@ If you use the result of a user-defined function in an INSERT or UPDATE statemen of knowing if that function is idempotent: ```java -Statement statement = insertInto("foo").value("k", function("generate_id")).build(); +SimpleStatement statement = insertInto("foo").value("k", function("generate_id")).build(); // INSERT INTO foo (k) VALUES (generate_id()) assert !statement.isIdempotent(); ``` @@ -47,7 +66,7 @@ assert !statement.isIdempotent(); This extends to arithmetic operations using such terms: ```java -Statement statement = +SimpleStatement statement = insertInto("foo").value("k", add(function("generate_id"), literal(1))).build(); // INSERT INTO foo (k) VALUES (generate_id()+1) assert !statement.isIdempotent(); @@ -56,7 +75,7 @@ assert !statement.isIdempotent(); Raw terms could be anything, so they are also considered unsafe by default: ```java -Statement statement = +SimpleStatement statement = insertInto("foo").value("k", raw("generate_id()+1")).build(); // INSERT INTO foo (k) VALUES (generate_id()+1) assert !statement.isIdempotent(); @@ -68,7 +87,7 @@ If a WHERE clause in an UPDATE or DELETE statement uses a comparison with an uns potentially apply to different rows for each execution: ```java -Statement statement = +SimpleStatement statement = update("foo") .setColumn("v", bindMarker()) .whereColumn("k").isEqualTo(function("non_idempotent_func")) @@ -82,7 +101,7 @@ assert !statement.isIdempotent(); Counter updates are never idempotent: ```java -Statement statement = +SimpleStatement statement = update("foo") .increment("c") .whereColumn("k").isEqualTo(bindMarker()) @@ -94,12 +113,12 @@ assert !statement.isIdempotent(); Nor is appending or prepending an element to a list: ```java -Statement statement = +SimpleStatement statement = update("foo") .appendListElement("l", literal(1)) .whereColumn("k").isEqualTo(bindMarker()) .build(); -// UPDATE foo SET l+=[1] WHERE k=? +// UPDATE foo SET l=l+[1] WHERE k=? assert !statement.isIdempotent(); ``` @@ -107,7 +126,7 @@ The generic `append` and `prepend` methods apply to any kind of collection, so w them unsafe by default too: ```java -Statement statement = +SimpleStatement statement = update("foo") .prepend("l", literal(Arrays.asList(1, 2, 3))) .whereColumn("k").isEqualTo(bindMarker()) @@ -116,12 +135,66 @@ Statement statement = assert !statement.isIdempotent(); ``` +The generic `remove` method is however safe since collection removals are idempotent: + +```java +SimpleStatement statement = + update("foo") + .remove("l", literal(Arrays.asList(1, 2, 3))) + .whereColumn("k").isEqualTo(bindMarker()) + .build(); +// UPDATE foo SET l=l-[1,2,3] WHERE k=? +assert statement.isIdempotent(); +``` + +When appending, prepending or removing a single element to/from a collection, it is possible to use +the dedicated methods listed below; their idempotence depends on the collection type (list, set or +map), the operation (append, prepend or removal) and the idempotence of the element being +added/removed: + +1. `appendListElement` : not idempotent +2. `prependListElement` : not idempotent +3. `removeListElement` : idempotent if element is idempotent +4. `appendSetElement` : idempotent if element is idempotent +5. `prependSetElement` : idempotent if element is idempotent +6. `removeSetElement` : idempotent if element is idempotent +7. `appendMapElement` : idempotent if both key and value are idempotent +8. `prependMapElement` : idempotent if both key and value are idempotent +9. `removeMapElement` : idempotent if both key and value are idempotent + +In practice, most invocations of the above methods will be idempotent because most collection +elements are. For example, the following statement is idempotent since `literal(1)` is also +idempotent: + +```java +SimpleStatement statement = + update("foo") + .removeListElement("l", literal(1)) + .whereColumn("k").isEqualTo(bindMarker()) + .build(); +// UPDATE foo SET l=l-[1] WHERE k=? +assert statement.isIdempotent(); +``` + +However, in rare cases the resulting statement won't be marked idempotent, e.g. if you use a +function to select a collection element: + +```java +SimpleStatement statement = + update("foo") + .removeListElement("l", function("myfunc")) + .whereColumn("k").isEqualTo(bindMarker()) + .build(); +// UPDATE foo SET l=l-[myfunc()] WHERE k=? +assert !statement.isIdempotent(); +``` + ### Unsafe deletions Deleting from a list is not idempotent: ```java -Statement statement = +SimpleStatement statement = deleteFrom("foo") .element("l", literal(0)) .whereColumn("k").isEqualTo(bindMarker()) @@ -171,4 +244,4 @@ sequential history that is correct. From our clients' point of view, there were But overall the column changed from 1 to 2. There is no ordering of the two operations that can explain that change. We broke linearizability by doing a transparent retry at step 6. -[linearizability]: https://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability \ No newline at end of file +[linearizability]: https://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability diff --git a/manual/query_builder/insert/README.md b/manual/query_builder/insert/README.md index 15e1701d2d2..6bac896d9b8 100644 --- a/manual/query_builder/insert/README.md +++ b/manual/query_builder/insert/README.md @@ -1,3 +1,22 @@ + + ## INSERT To start an INSERT query, use one of the `insertInto` methods in [QueryBuilder]. There are @@ -114,4 +133,4 @@ is executed. This is distinctly different than setting the value to null. Passin this method will only remove the USING TTL clause from the query, which will not alter the TTL (if one is set) in Cassandra. -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html \ No newline at end of file +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html diff --git a/manual/query_builder/relation/README.md b/manual/query_builder/relation/README.md index 24a6d07ed6a..eb1c728888e 100644 --- a/manual/query_builder/relation/README.md +++ b/manual/query_builder/relation/README.md @@ -1,3 +1,22 @@ + + ## Relations A relation is a clause that appears after the WHERE keyword, and restricts the rows that the @@ -201,5 +220,5 @@ This should be used with caution, as it's possible to generate invalid CQL that execution time; on the other hand, it can be used as a workaround to handle new CQL features that are not yet covered by the query builder. -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Relation]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/relation/Relation.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[Relation]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/relation/Relation.html diff --git a/manual/query_builder/schema/README.md b/manual/query_builder/schema/README.md index 56c4fc86784..0472c8e8c6f 100644 --- a/manual/query_builder/schema/README.md +++ b/manual/query_builder/schema/README.md @@ -1,3 +1,22 @@ + + # Schema builder The schema builder is an additional API provided by [java-driver-query-builder](../) that enables @@ -44,4 +63,4 @@ element type: * [function](function/) * [aggregate](aggregate/) -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/aggregate/README.md b/manual/query_builder/schema/aggregate/README.md index 2b52470b590..a54f8703d69 100644 --- a/manual/query_builder/schema/aggregate/README.md +++ b/manual/query_builder/schema/aggregate/README.md @@ -1,3 +1,22 @@ + + ## Aggregate Aggregates enable users to apply User-defined functions (UDF) to rows in a data set and combine @@ -76,4 +95,4 @@ dropAggregate("average").ifExists(); // DROP AGGREGATE IF EXISTS average ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/function/README.md b/manual/query_builder/schema/function/README.md index 5ae566bc0b6..001327626b1 100644 --- a/manual/query_builder/schema/function/README.md +++ b/manual/query_builder/schema/function/README.md @@ -1,3 +1,22 @@ + + ## Function User-defined functions (UDF) enable users to create user code written in JSR-232 compliant scripting @@ -92,4 +111,4 @@ dropFunction("log").ifExists(); // DROP FUNCTION IF EXISTS log ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/index/README.md b/manual/query_builder/schema/index/README.md index 9ab07315e67..c0c9448dfab 100644 --- a/manual/query_builder/schema/index/README.md +++ b/manual/query_builder/schema/index/README.md @@ -1,3 +1,22 @@ + + # Index An index provides a means of expanding the query capabilities of a table. [SchemaBuilder] offers @@ -99,4 +118,4 @@ dropIndex("my_idx").ifExists(); // DROP INDEX IF EXISTS my_idx ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/keyspace/README.md b/manual/query_builder/schema/keyspace/README.md index dc2cb73eb28..572e8af1658 100644 --- a/manual/query_builder/schema/keyspace/README.md +++ b/manual/query_builder/schema/keyspace/README.md @@ -1,3 +1,22 @@ + + ## Keyspace A keyspace is a top-level namespace that defines a name, replication strategy and configurable @@ -83,6 +102,6 @@ dropKeyspace("cycling").ifExists(); // DROP KEYSPACE IF EXISTS cycling ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/schema/materialized_view/README.md b/manual/query_builder/schema/materialized_view/README.md index 4da83286a3a..c4f495f95aa 100644 --- a/manual/query_builder/schema/materialized_view/README.md +++ b/manual/query_builder/schema/materialized_view/README.md @@ -1,3 +1,22 @@ + + ## Materialized View Materialized Views are an experimental feature introduced in Apache Cassandra 3.0 that provide a @@ -85,5 +104,5 @@ dropTable("cyclist_by_age").ifExists(); // DROP MATERIALIZED VIEW IF EXISTS cyclist_by_age ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html -[RelationStructure]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[RelationStructure]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.html diff --git a/manual/query_builder/schema/table/README.md b/manual/query_builder/schema/table/README.md index 3ca6ab0c1fe..090f8a1f67b 100644 --- a/manual/query_builder/schema/table/README.md +++ b/manual/query_builder/schema/table/README.md @@ -1,3 +1,22 @@ + + ## Table Data in Apache Cassandra is stored in tables. [SchemaBuilder] offers API methods for creating, @@ -31,12 +50,12 @@ CreateTable create = createTable("cycling", "cyclist_name").withPartitionKey("id A table with only one column is not so typical however. At this point you may provide partition, clustering, regular and static columns using any of the following API methods: -* `withPrimaryKey(name, dataType)` +* `withPartitionKey(name, dataType)` * `withClusteringColumn(name, dataType)` * `withColumn(name, dataType)` * `withStaticColumn(name, dataType)` -Primary key precedence is driven by the order of `withPrimaryKey` and `withClusteringKey` +Primary key precedence is driven by the order of `withPartitionKey` and `withClusteringKey` invocations, for example: @@ -107,6 +126,6 @@ dropTable("cyclist_name").ifExists(); // DROP TABLE IF EXISTS cyclist_name ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html -[CreateTableWithOptions]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.html -[AlterTableWithOptions]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[CreateTableWithOptions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.html +[AlterTableWithOptions]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.html diff --git a/manual/query_builder/schema/type/README.md b/manual/query_builder/schema/type/README.md index 34c548cea12..c289ad776a8 100644 --- a/manual/query_builder/schema/type/README.md +++ b/manual/query_builder/schema/type/README.md @@ -1,3 +1,22 @@ + + ## Type User-defined types are special types that can associate multiple named fields to a single column. @@ -88,4 +107,4 @@ dropTable("address").ifExists(); // DROP TYPE IF EXISTS address ``` -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.html diff --git a/manual/query_builder/select/README.md b/manual/query_builder/select/README.md index aa08c4c848f..0425423a402 100644 --- a/manual/query_builder/select/README.md +++ b/manual/query_builder/select/README.md @@ -1,3 +1,22 @@ + + ## SELECT Start your SELECT with the `selectFrom` method in [QueryBuilder]. There are several variants @@ -368,6 +387,29 @@ selectFrom("sensor_data") // SELECT reading FROM sensor_data WHERE id=? ORDER BY date DESC ``` +Vector Search: + +```java + +import com.datastax.oss.driver.api.core.data.CqlVector; + +selectFrom("foo") + .all() + .where(Relation.column("k").isEqualTo(literal(1))) + .orderByAnnOf("c1", CqlVector.newInstance(0.1, 0.2, 0.3)); +// SELECT * FROM foo WHERE k=1 ORDER BY c1 ANN OF [0.1, 0.2, 0.3] + +selectFrom("cycling", "comments_vs") + .column("comment") + .function( + "similarity_cosine", + Selector.column("comment_vector"), + literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) + .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) + .limit(1); +// SELECT comment,similarity_cosine(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1 +``` + Limits: ```java @@ -391,5 +433,5 @@ selectFrom("user").all().allowFiltering(); // SELECT * FROM user ALLOW FILTERING ``` -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Selector]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/select/Selector.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[Selector]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/select/Selector.html diff --git a/manual/query_builder/term/README.md b/manual/query_builder/term/README.md index 34b88d5f609..460ed8dcb10 100644 --- a/manual/query_builder/term/README.md +++ b/manual/query_builder/term/README.md @@ -1,3 +1,22 @@ + + ## Terms A term is an expression that does not involve the value of a column. It is used: @@ -105,5 +124,5 @@ This should be used with caution, as it's possible to generate invalid CQL that execution time; on the other hand, it can be used as a workaround to handle new CQL features that are not yet covered by the query builder. -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html \ No newline at end of file +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[CodecRegistry]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/core/type/codec/registry/CodecRegistry.html diff --git a/manual/query_builder/truncate/README.md b/manual/query_builder/truncate/README.md index 2ac35b71983..c8cd6945123 100644 --- a/manual/query_builder/truncate/README.md +++ b/manual/query_builder/truncate/README.md @@ -1,3 +1,22 @@ + + ## TRUNCATE To create a TRUNCATE query, use one of the `truncate` methods in [QueryBuilder]. There are several @@ -17,4 +36,4 @@ Truncate truncate2 = truncate(CqlIdentifier.fromCql("mytable")); Note that, at this stage, the query is ready to build. After creating a TRUNCATE query it does not take any values. -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html diff --git a/manual/query_builder/update/README.md b/manual/query_builder/update/README.md index c4422673877..15502f52bb7 100644 --- a/manual/query_builder/update/README.md +++ b/manual/query_builder/update/README.md @@ -1,3 +1,22 @@ + + ## UPDATE To start an UPDATE query, use one of the `update` methods in [QueryBuilder]. There are several @@ -251,5 +270,5 @@ update("foo") Conditions are a common feature used by UPDATE and DELETE, so they have a [dedicated page](../condition) in this manual. -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html -[Assignment]: https://docs.datastax.com/en/drivers/java/4.1/com/datastax/oss/driver/api/querybuilder/update/Assignment.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/QueryBuilder.html +[Assignment]: https://docs.datastax.com/en/drivers/java/4.17/com/datastax/oss/driver/api/querybuilder/update/Assignment.html diff --git a/mapper-processor/CONTRIBUTING.md b/mapper-processor/CONTRIBUTING.md index 11659a9f936..c6d324106c4 100644 --- a/mapper-processor/CONTRIBUTING.md +++ b/mapper-processor/CONTRIBUTING.md @@ -1,3 +1,22 @@ + + # Mapper contributing guidelines Everything in the [main contribution guidelines](../CONTRIBUTING.md) also applies to the mapper. diff --git a/mapper-processor/pom.xml b/mapper-processor/pom.xml index 3d3538f4e59..04d8c98c4f0 100644 --- a/mapper-processor/pom.xml +++ b/mapper-processor/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-mapper-processor - - DataStax Java driver for Apache Cassandra(R) - object mapper processor - + Apache Cassandra Java Driver - object mapper processor + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + - com.datastax.oss + org.apache.cassandra java-driver-mapper-runtime - ${project.version} - com.datastax.oss - java-driver-shaded-guava + org.apache.cassandra + java-driver-guava-shaded com.squareup javapoet - - com.google.auto.service - auto-service - true - com.github.stephenc.jcip jcip-annotations + provided com.github.spotbugs spotbugs-annotations + provided com.google.testing.compile @@ -80,12 +86,65 @@ mockito-core test + + ch.qos.logback + logback-classic + test + + + org.apache.cassandra + java-driver-core + test + test-jar + - + + + src/main/resources + + + ${project.basedir}/.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + + src/test/resources + + project.properties + + true + + + src/test/resources + + project.properties + + false + + + + maven-compiler-plugin + + none + + maven-jar-plugin + + + + com.datastax.oss.driver.mapper.processor + + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + diff --git a/mapper-processor/src/test/resources/project.properties b/mapper-processor/src/test/resources/project.properties new file mode 100644 index 00000000000..66eab90b6e4 --- /dev/null +++ b/mapper-processor/src/test/resources/project.properties @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +project.basedir=${basedir} \ No newline at end of file diff --git a/mapper-runtime/pom.xml b/mapper-runtime/pom.xml index da5be25bded..57fbd5d3432 100644 --- a/mapper-runtime/pom.xml +++ b/mapper-runtime/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-mapper-runtime bundle - - DataStax Java driver for Apache Cassandra(R) - object mapper runtime - + Apache Cassandra Java Driver - object mapper runtime + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + ${project.groupId} java-driver-core - ${project.version} ${project.groupId} java-driver-query-builder - ${project.version} com.github.stephenc.jcip jcip-annotations + provided com.github.spotbugs spotbugs-annotations + provided + + + junit + junit + test + + + org.testng + testng + test + + + org.reactivestreams + reactive-streams-tck + test + + + io.reactivex.rxjava2 + rxjava + test + + + org.mockito + mockito-core + test + + + org.assertj + assertj-core + test + + + org.apache.cassandra + java-driver-core + test + test-jar - + + + src/main/resources + + + ${project.basedir}/.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + + src/test/resources + + project.properties + + true + + + src/test/resources + + project.properties + + false + + + + maven-jar-plugin + + + + com.datastax.oss.driver.mapper.runtime + + + + + + maven-surefire-plugin + + ${testing.jvm}/bin/java + 1 + + + + junit + false + + + suitename + Reactive Streams TCK + + + + + + org.apache.maven.surefire + surefire-junit47 + ${surefire.version} + + + org.apache.maven.surefire + surefire-testng + ${surefire.version} + + + org.apache.felix maven-bundle-plugin @@ -69,23 +184,44 @@ (so reflection-based loading of policies works) --> * - - !net.jcip.annotations.*, - !edu.umd.cs.findbugs.annotations.*, - * + !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, + org.reactivestreams.*;resolution:=optional, * - - com.datastax.oss.driver.*.mapper.* - + com.datastax.*.driver.*.mapper.* + + maven-dependency-plugin + + + generate-dependency-list + + list + + generate-resources + + runtime + true + com.datastax.cassandra,com.datastax.dse + ${project.build.outputDirectory}/com/datastax/dse/driver/internal/mapper/deps.txt + + + + diff --git a/mapper-runtime/revapi.json b/mapper-runtime/revapi.json index 0f06ffd3673..3dc2ea21671 100644 --- a/mapper-runtime/revapi.json +++ b/mapper-runtime/revapi.json @@ -1,5 +1,3 @@ -// Configures Revapi (https://revapi.org/getting-started.html) to check API compatibility between -// successive driver versions. { "revapi": { "java": { @@ -8,15 +6,71 @@ "regex": true, "exclude": [ "com\\.datastax\\.oss\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", "com\\.datastax\\.oss\\.simulacron(\\..+)?", - // Don't re-check sibling modules that this module depends on - "com\\.datastax\\.oss\\.driver\\.api\\.core(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.api\\.querybuilder(\\..+)?" + "// Don't re-check sibling modules that this module depends on", + "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.core(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.querybuilder(\\..+)?" ] } } - } + }, + "ignore": [ + { + "regex": true, + "code": "java.annotation.attributeValueChanged", + "old": "@interface com\\.datastax\\.oss\\.driver\\.api\\.mapper\\.annotations\\..*", + "annotationType": "java.lang.annotation.Retention", + "attribute": "value", + "oldValue": "java.lang.annotation.RetentionPolicy.CLASS", + "newValue": "java.lang.annotation.RetentionPolicy.RUNTIME", + "justification": "JAVA-2369: Change mapper annotations retention to runtime" + }, + { + "code": "java.annotation.added", + "old": "@interface com.datastax.oss.driver.api.mapper.annotations.Computed", + "new": "@interface com.datastax.oss.driver.api.mapper.annotations.Computed", + "annotation": "@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME)", + "justification": "Oversight, should have been annotated this way from the start" + }, + { + "code": "java.annotation.added", + "old": "@interface com.datastax.oss.driver.api.mapper.annotations.Computed", + "new": "@interface com.datastax.oss.driver.api.mapper.annotations.Computed", + "annotation": "@java.lang.annotation.Target({java.lang.annotation.ElementType.FIELD, java.lang.annotation.ElementType.METHOD})", + "justification": "Oversight, should have been annotated this way from the start" + }, + { + "code": "java.annotation.added", + "old": "@interface com.datastax.oss.driver.api.mapper.annotations.TransientProperties", + "new": "@interface com.datastax.oss.driver.api.mapper.annotations.TransientProperties", + "annotation": "@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME)", + "justification": "Oversight, should have been annotated this way from the start" + }, + { + "code": "java.annotation.added", + "old": "@interface com.datastax.oss.driver.api.mapper.annotations.TransientProperties", + "new": "@interface com.datastax.oss.driver.api.mapper.annotations.TransientProperties", + "annotation": "@java.lang.annotation.Target({java.lang.annotation.ElementType.TYPE})", + "justification": "Oversight, should have been annotated this way from the start" + }, + { + "code": "java.method.addedToInterface", + "new": "method java.lang.String com.datastax.oss.driver.api.mapper.MapperContext::getExecutionProfileName()", + "justification": "JAVA-2633: Add execution profile argument to DAO factory method (accept API break -- it's unlikely that MapperContext will be implemented outside of the driver)" + + }, + { + "code": "java.method.addedToInterface", + "new": "method com.datastax.oss.driver.api.core.config.DriverExecutionProfile com.datastax.oss.driver.api.mapper.MapperContext::getExecutionProfile()", + "justification": "JAVA-2633: Add execution profile argument to DAO factory method (accept API break -- it's unlikely that MapperContext will be implemented outside of the driver)" + }, + { + "code": "java.method.addedToInterface", + "new": "method com.datastax.oss.driver.api.mapper.result.MapperResultProducer com.datastax.oss.driver.api.mapper.MapperContext::getResultProducer(com.datastax.oss.driver.api.core.type.reflect.GenericType)", + "justification": "JAVA-2792: Allow custom results in the mapper (accept API break -- it's unlikely that MapperContext will be implemented outside of the driver)" + } + ] } } diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.java new file mode 100644 index 00000000000..b4e6960ed66 --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSet.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.mapper.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveQueryMetadata; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import org.reactivestreams.Publisher; + +/** + * A {@link Publisher} of mapped entities returned by DAO methods. In other words, this interface is + * the equivalent of {@link ReactiveResultSet} for mapped entities. + * + *

        By default, all implementations returned by the driver are cold, unicast, single-subscriber + * only publishers. In other words, they do not support multiple subscriptions; consider + * caching the results produced by such publishers if you need to consume them by more than one + * downstream subscriber. + * + *

        Also, note that mapped reactive result sets may emit items to their subscribers on an internal + * driver IO thread. Subscriber implementors are encouraged to abide by Reactive Streams + * Specification rule 2.2 and avoid performing heavy computations or blocking calls inside + * {@link org.reactivestreams.Subscriber#onNext(Object) onNext} calls, as doing so could slow down + * the driver and impact performance. Instead, they should asynchronously dispatch received signals + * to their processing logic. + * + *

        This type is located in a {@code dse} package for historical reasons; reactive result sets + * work with both Cassandra and DSE. + * + * @see ReactiveResultSet + */ +public interface MappedReactiveResultSet + extends Publisher, ReactiveQueryMetadata {} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/DefaultMappedReactiveResultSet.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/DefaultMappedReactiveResultSet.java new file mode 100644 index 00000000000..e1e701faddd --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/DefaultMappedReactiveResultSet.java @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.mapper.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveRow; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; +import java.util.function.Function; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DefaultMappedReactiveResultSet implements MappedReactiveResultSet { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultMappedReactiveResultSet.class); + + private static final Subscription EMPTY_SUBSCRIPTION = + new Subscription() { + @Override + public void request(long n) {} + + @Override + public void cancel() {} + }; + + @NonNull private final ReactiveResultSet source; + + @NonNull private final Function mapper; + + public DefaultMappedReactiveResultSet( + @NonNull ReactiveResultSet source, @NonNull Function mapper) { + this.source = source; + this.mapper = mapper; + } + + @Override + @NonNull + public Publisher getColumnDefinitions() { + return source.getColumnDefinitions(); + } + + @Override + @NonNull + public Publisher getExecutionInfos() { + return source.getExecutionInfos(); + } + + @Override + @NonNull + public Publisher wasApplied() { + return source.wasApplied(); + } + + @Override + public void subscribe(@NonNull Subscriber subscriber) { + // As per rule 1.9, we need to throw an NPE if subscriber is null + Objects.requireNonNull(subscriber, "Subscriber cannot be null"); + // As per rule 1.11, this publisher supports multiple subscribers in a unicast configuration, + // as long as the source publisher does too. + MappedReactiveResultSetSubscriber s = new MappedReactiveResultSetSubscriber(subscriber); + try { + source.subscribe(s); + } catch (Throwable t) { + // As per rule 1.9: subscribe MUST return normally. The only legal way to signal failure (or + // reject the Subscriber) is by calling onError (after calling onSubscribe). + s.cancel(); + IllegalStateException error = + new IllegalStateException( + "Publisher violated $1.9 by throwing an exception from subscribe.", t); + LOG.error(error.getMessage(), error.getCause()); + // This may violate 1.9 since we cannot know if subscriber.onSubscribe was called or not. + subscriber.onSubscribe(EMPTY_SUBSCRIPTION); + subscriber.onError(error); + } + // As per 1.9, this method must return normally (i.e. not throw) + } + + private class MappedReactiveResultSetSubscriber implements Subscriber, Subscription { + + private volatile Subscriber downstreamSubscriber; + private volatile Subscription upstreamSubscription; + private volatile boolean terminated; + + MappedReactiveResultSetSubscriber(@NonNull Subscriber subscriber) { + this.downstreamSubscriber = subscriber; + } + + @Override + public void onSubscribe(@NonNull Subscription subscription) { + // As per rule 2.13, we need to throw NPE if the subscription is null + Objects.requireNonNull(subscription, "Subscription cannot be null"); + // As per rule 2.12, Subscriber.onSubscribe MUST be called at most once for a given subscriber + if (upstreamSubscription != null) { + try { + // Cancel the additional subscription + subscription.cancel(); + } catch (Throwable t) { + // As per rule 3.15, Subscription.cancel is not allowed to throw an exception; the only + // thing we can do is log. + LOG.error("Subscription violated $3.15 by throwing an exception from cancel.", t); + } + } else if (!terminated) { + upstreamSubscription = subscription; + try { + downstreamSubscriber.onSubscribe(this); + } catch (Throwable t) { + // As per rule 2.13: In the case that this rule is violated, + // any associated Subscription to the Subscriber MUST be considered as + // cancelled... + cancel(); + // ...and the caller MUST raise this error condition in a fashion that is "adequate for + // the runtime environment" (we choose to log). + LOG.error("Subscriber violated $2.13 by throwing an exception from onSubscribe.", t); + } + } + } + + @Override + public void onNext(@NonNull ReactiveRow row) { + LOG.trace("Received onNext: {}", row); + if (upstreamSubscription == null) { + LOG.error("Publisher violated $1.09 by signalling onNext prior to onSubscribe."); + } else if (!terminated) { + Objects.requireNonNull(row, "Publisher violated $2.13 by emitting a null element"); + EntityT entity; + try { + entity = mapper.apply(row); + } catch (Throwable t) { + onError(t); + return; + } + Objects.requireNonNull(entity, "Publisher violated $2.13 by generating a null entity"); + try { + downstreamSubscriber.onNext(entity); + } catch (Throwable t) { + LOG.error("Subscriber violated $2.13 by throwing an exception from onNext.", t); + cancel(); + } + } + } + + @Override + public void onComplete() { + LOG.trace("Received onComplete"); + if (upstreamSubscription == null) { + LOG.error("Publisher violated $1.09 by signalling onComplete prior to onSubscribe."); + } else if (!terminated) { + try { + downstreamSubscriber.onComplete(); + } catch (Throwable t) { + LOG.error("Subscriber violated $2.13 by throwing an exception from onComplete.", t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + } + + @Override + public void onError(@NonNull Throwable error) { + LOG.trace("Received onError", error); + if (upstreamSubscription == null) { + LOG.error("Publisher violated $1.09 by signalling onError prior to onSubscribe."); + } else if (!terminated) { + Objects.requireNonNull(error, "Publisher violated $2.13 by signalling a null error"); + try { + downstreamSubscriber.onError(error); + } catch (Throwable t) { + t.addSuppressed(error); + LOG.error("Subscriber violated $2.13 by throwing an exception from onError.", t); + } + // We need to consider this Subscription as cancelled as per rule 1.6 + cancel(); + } + } + + @Override + public void request(long n) { + LOG.trace("Received request: {}", n); + // As per 3.6: after the Subscription is cancelled, additional calls to request() MUST be + // NOPs. + // Implementation note: triggering onError() from below may break 1.3 because this method is + // called by the subscriber thread, and it can race with the producer thread. But these + // situations are already abnormal, so there is no point in trying to prevent the race + // condition with locks. + if (!terminated) { + if (n <= 0) { + // Validate request as per rule 3.9: While the subscription is not cancelled, + // Subscription.request(long n) MUST signal onError with a + // java.lang.IllegalArgumentException if the argument is <= 0. + // The cause message SHOULD explain that non-positive request signals are illegal. + onError( + new IllegalArgumentException( + "Subscriber violated $3.9 by requesting a non-positive number of elements.")); + } else { + try { + upstreamSubscription.request(n); + } catch (Throwable t) { + // As per rule 3.16, Subscription.request is not allowed to throw + IllegalStateException error = + new IllegalStateException( + "Subscription violated $3.16 by throwing an exception from request.", t); + onError(error); + } + } + } + } + + @Override + public void cancel() { + // As per 3.5: Subscription.cancel() MUST respect the responsiveness of its caller by + // returning in a timely manner, MUST be idempotent and MUST be thread-safe. + if (!terminated) { + terminated = true; + LOG.trace("Cancelling"); + // propagate cancellation, if we got a chance to subscribe to the upstream source + if (upstreamSubscription != null) { + upstreamSubscription.cancel(); + } + // As per 3.13, Subscription.cancel() MUST request the Publisher to + // eventually drop any references to the corresponding subscriber. + downstreamSubscriber = null; + upstreamSubscription = null; + } + } + } +} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/FailedMappedReactiveResultSet.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/FailedMappedReactiveResultSet.java new file mode 100644 index 00000000000..3ed27edbf9d --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/FailedMappedReactiveResultSet.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.mapper.reactive; + +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.dse.driver.internal.core.cql.reactive.FailedPublisher; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import edu.umd.cs.findbugs.annotations.NonNull; +import org.reactivestreams.Publisher; + +/** + * A mapped reactive result set that immediately signals the error passed at instantiation to all + * its subscribers. + */ +public class FailedMappedReactiveResultSet extends FailedPublisher + implements MappedReactiveResultSet { + + public FailedMappedReactiveResultSet(Throwable error) { + super(error); + } + + @NonNull + @Override + public Publisher getColumnDefinitions() { + return new FailedPublisher<>(error); + } + + @NonNull + @Override + public Publisher getExecutionInfos() { + return new FailedPublisher<>(error); + } + + @NonNull + @Override + public Publisher wasApplied() { + return new FailedPublisher<>(error); + } +} diff --git a/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/ReactiveDaoBase.java b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/ReactiveDaoBase.java new file mode 100644 index 00000000000..56576829a40 --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/dse/driver/internal/mapper/reactive/ReactiveDaoBase.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.mapper.reactive; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import com.datastax.oss.driver.internal.mapper.DaoBase; + +public class ReactiveDaoBase extends DaoBase { + + protected ReactiveDaoBase(MapperContext context) { + super(context); + } + + protected ReactiveResultSet executeReactive(Statement statement) { + return context.getSession().executeReactive(statement); + } + + protected MappedReactiveResultSet executeReactiveAndMap( + Statement statement, EntityHelper entityHelper) { + ReactiveResultSet source = executeReactive(statement); + return new DefaultMappedReactiveResultSet<>(source, row -> entityHelper.get(row, false)); + } +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java index d45d772ee5e..3838892172a 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,15 @@ */ package com.datastax.oss.driver.api.mapper; +import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; import com.datastax.oss.driver.api.mapper.annotations.Mapper; +import com.datastax.oss.driver.api.mapper.annotations.NamingStrategy; import com.datastax.oss.driver.api.mapper.annotations.QueryProvider; +import com.datastax.oss.driver.api.mapper.annotations.SchemaHint; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.HashMap; @@ -31,12 +39,138 @@ */ public abstract class MapperBuilder { + public static final String SCHEMA_VALIDATION_ENABLED_SETTING = + "datastax.mapper.schemaValidationEnabled"; protected final CqlSession session; + protected CqlIdentifier defaultKeyspaceId; protected Map customState; + protected String defaultExecutionProfileName; + protected DriverExecutionProfile defaultExecutionProfile; protected MapperBuilder(CqlSession session) { this.session = session; this.customState = new HashMap<>(); + // schema validation is enabled by default + customState.put(SCHEMA_VALIDATION_ENABLED_SETTING, true); + } + + /** + * Specifies a default keyspace that will be used for all DAOs built with this mapper (unless they + * specify their own keyspace). + * + *

        In other words, given the following definitions: + * + *

        +   * @Mapper
        +   * public interface InventoryMapper {
        +   *   @DaoFactory
        +   *   ProductDao productDao();
        +   *
        +   *   @DaoFactory
        +   *   ProductDao productDao(@DaoKeyspace CqlIdentifier keyspace);
        +   * }
        +   *
        +   * InventoryMapper mapper1 = new InventoryMapperBuilder(session)
        +   *     .withDefaultKeyspace(CqlIdentifier.fromCql("ks1"))
        +   *     .build();
        +   * InventoryMapper mapper2 = new InventoryMapperBuilder(session)
        +   *     .withDefaultKeyspace(CqlIdentifier.fromCql("ks2"))
        +   *     .build();
        +   * 
        + * + * Then: + * + *
          + *
        • {@code mapper1.productDao()} will use keyspace {@code ks1}; + *
        • {@code mapper2.productDao()} will use keyspace {@code ks2}; + *
        • {@code mapper1.productDao(CqlIdentifier.fromCql("ks3"))} will use keyspace {@code ks3}. + *
        + * + * @see DaoFactory + */ + @NonNull + public MapperBuilder withDefaultKeyspace(@Nullable CqlIdentifier keyspaceId) { + this.defaultKeyspaceId = keyspaceId; + return this; + } + + /** + * Shortcut for {@link #withDefaultKeyspace(CqlIdentifier) + * withDefaultKeyspace(CqlIdentifier.fromCql(keyspaceName))}. + */ + @NonNull + public MapperBuilder withDefaultKeyspace(@Nullable String keyspaceName) { + return withDefaultKeyspace(keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName)); + } + + /** + * Specifies a default execution profile name that will be used for all DAOs built with this + * mapper (unless they specify their own execution profile). + * + *

        This works the same way as the {@linkplain #withDefaultKeyspace(CqlIdentifier) default + * keyspace}. + * + *

        Note that if you had already set a profile with #withDefaultExecutionProfile, this method + * erases it. + * + * @see DaoFactory + */ + @NonNull + public MapperBuilder withDefaultExecutionProfileName( + @Nullable String executionProfileName) { + this.defaultExecutionProfileName = executionProfileName; + if (executionProfileName != null) { + this.defaultExecutionProfile = null; + } + return this; + } + + /** + * Specifies a default execution profile name that will be used for all DAOs built with this + * mapper (unless they specify their own execution profile). + * + *

        This works the same way as the {@linkplain #withDefaultKeyspace(CqlIdentifier) default + * keyspace}. + * + *

        Note that if you had already set a profile name with #withDefaultExecutionProfileName, this + * method erases it. + * + * @see DaoFactory + */ + @NonNull + public MapperBuilder withDefaultExecutionProfile( + @Nullable DriverExecutionProfile executionProfile) { + this.defaultExecutionProfile = executionProfile; + if (executionProfile != null) { + this.defaultExecutionProfileName = null; + } + return this; + } + + /** + * Whether to validate mapped entities against the database schema. + * + *

        If this is enabled, then every time a new DAO gets created, for each entity referenced in + * the DAO, the mapper will check that there is a corresponding table or UDT. + * + *

          + *
        • for each entity field, the database table or UDT must contain a column with the + * corresponding name (according to the {@link NamingStrategy}). + *
        • the types must be compatible, according to the {@link CodecRegistry} used by the session. + *
        • additionally, if the target element is a table, the primary key must be properly + * annotated in the entity. + *
        + * + * If any of those steps fails, an {@link IllegalArgumentException} is thrown. + * + *

        Schema validation is enabled by default; it adds a small startup overhead, so once your + * application is stable you may want to disable it. + * + * @see SchemaHint + */ + public MapperBuilder withSchemaValidationEnabled(boolean enableSchemaValidation) { + customState.put(SCHEMA_VALIDATION_ENABLED_SETTING, enableSchemaValidation); + return this; } /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java index aaaf14fd7a9..9f9df5f93f7 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperContext.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +19,11 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducerService; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Map; @@ -45,6 +51,26 @@ public interface MapperContext { @Nullable CqlIdentifier getTableId(); + /** + * If this context belongs to a DAO that was built with a method that takes an execution profile + * name as parameter, the value of that parameter. Otherwise null. + * + *

        Note that this is mutually exclusive with {@link #getExecutionProfile()}: at most one of the + * two methods returns a non-null value (or both return null if no profile was provided). + */ + @Nullable + String getExecutionProfileName(); + + /** + * If this context belongs to a DAO that was built with a method that takes an execution profile + * as parameter, the value of that parameter. Otherwise null. + * + *

        Note that this is mutually exclusive with {@link #getExecutionProfileName()}: at most one of + * the two methods returns a non-null value (or both return null if no profile was provided). + */ + @Nullable + DriverExecutionProfile getExecutionProfile(); + /** * Returns an instance of the given converter class. * @@ -62,4 +88,18 @@ public interface MapperContext { */ @NonNull Map getCustomState(); + + /** + * Returns a component that will execute a statement and convert it into a custom result of the + * given type. + * + *

        These components must be registered through the Java Service Provider Interface mechanism, + * see {@link MapperResultProducerService}. + * + *

        The results of this method are cached at the JVM level. + * + * @throws IllegalArgumentException if no producer was registered for this type. + */ + @NonNull + MapperResultProducer getResultProducer(@NonNull GenericType resultToProduce); } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java index 06b0f1646cf..f659ac00ad5 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/MapperException.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java index 6a54cdb30e1..cfbf97e73aa 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/ClusteringColumn.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,7 +49,7 @@ *

        This annotation is mutually exclusive with {@link PartitionKey}. */ @Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface ClusteringColumn { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java index d867dd0bb09..817bbf2c294 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Computed.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,11 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + /** * Annotates the field or getter of an {@link Entity} property, to indicate that when retrieving * data that the property should be set to the result of computation on the Cassandra side, @@ -27,6 +34,8 @@ * private int writeTime; * */ +@Target({ElementType.FIELD, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) public @interface Computed { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java index e90703a6a17..9b9ef15afb6 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/CqlName.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -56,7 +58,7 @@ * entity. */ @Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface CqlName { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java index 876f8a98a99..bcab01d98fc 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Dao.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,11 +51,12 @@ *

      3. {@link GetEntity} *
      4. {@link Insert} *
      5. {@link Query} + *
      6. {@link QueryProvider} *
      7. {@link Select} *
      8. {@link SetEntity} - * + *
      9. {@link Update} * */ @Target(ElementType.TYPE) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Dao {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java index dddcdbcced7..c792c132fb0 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoFactory.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -92,5 +94,5 @@ * Note that the cache is a simple map with no eviction mechanism. */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface DaoFactory {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java index c6f6f462cce..6f24ffa9d56 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,5 +43,5 @@ * @see DaoFactory */ @Target(ElementType.PARAMETER) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface DaoKeyspace {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoProfile.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoProfile.java new file mode 100644 index 00000000000..66fc3ed433a --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoProfile.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.annotations; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotates the parameter of a {@link DaoFactory} method that indicates the execution profile to + * create a DAO for. + * + *

        Example: + * + *

        + *  * @Mapper
        + *  * public interface InventoryMapper {
        + *  *   ProductDao productDao(@DaoProfile String executionProfile);
        + *  * }
        + *  * 
        + * + * The annotated parameter can be a {@link String} or {@link DriverExecutionProfile}. If it is + * present, the value will be injected in the DAO instance, where it will be used in generated + * queries. This allows you to reuse the same DAO for different execution profiles. + * + * @see DaoFactory + */ +@Target(ElementType.PARAMETER) +@Retention(RetentionPolicy.RUNTIME) +public @interface DaoProfile {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java index 71efa4bbf7f..7ac0d66dd6c 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DaoTable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,5 +43,5 @@ * @see DaoFactory */ @Target(ElementType.PARAMETER) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface DaoTable {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java index db5aa0b621d..a5c33b3f17f 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/DefaultNullSavingStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -53,7 +55,7 @@ * Query} and {@link SetEntity} methods. */ @Target(ElementType.TYPE) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface DefaultNullSavingStrategy { NullSavingStrategy value(); } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java index 324aa24eb81..a9d2c03912f 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Delete.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +17,14 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -91,6 +96,12 @@ * ResultSet deleteIfDescriptionMatches(UUID productId, String expectedDescription); * // if the condition fails, the result set will contain columns '[applied]' and 'description' * + *
      10. a {@link BoundStatement}. This is intended for queries where you will execute this + * statement later or in a batch. + *
        + * @Delete
        + * BoundStatement delete(Product product);
        + *       
        *
      11. a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The method will * execute the query asynchronously. Note that for result sets, you need to switch to {@link * AsyncResultSet}. @@ -104,6 +115,12 @@ * @Delete(entityClass = Product.class, customIfClause = "description = :expectedDescription") * CompletionStage<AsyncResultSet> deleteIfDescriptionMatchesAsync(UUID productId, String expectedDescription); * + *
      12. a {@link ReactiveResultSet}. + *
        + * @Delete
        + * ReactiveResultSet deleteReactive(Product product);
        + *       
        + *
      13. a {@linkplain MapperResultProducer custom type}. * * * Note that you can also return a boolean or result set for non-conditional queries, but there's no @@ -122,7 +139,7 @@ * entity class and the naming convention). */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Delete { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java index 3466cf0d610..506c7f13d22 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Entity.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,8 +49,9 @@ * getDescription}) and has no parameters. The name of the property is obtained by removing * the "get" prefix and decapitalizing ({@code description}), and the type of the property is * the return type of the getter. - *
      14. there must be a matching setter method ({@code setDescription}), with a single - * parameter that has the same type as the property (the return type does not matter). + *
      15. unless the entity is {@linkplain PropertyStrategy#mutable() immutable}, there must + * be a matching setter method ({@code setDescription}), with a single parameter that has the + * same type as the property (the return type does not matter). * * * There may also be a matching field ({@code description}) that has the same type as the @@ -74,7 +77,7 @@ * inside other entities (to map UDT columns). */ @Target(ElementType.TYPE) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Entity { /** * Specifies a default keyspace to use when doing operations on this entity. diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java index 85017ffe03b..d86174bdc49 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/GetEntity.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -101,5 +103,19 @@ * {@link AsyncResultSet}), the mapper processor will issue a compile-time error. */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) -public @interface GetEntity {} +@Retention(RetentionPolicy.RUNTIME) +public @interface GetEntity { + + /** + * Whether to tolerate missing columns in the source data structure. + * + *

        If {@code false} (the default), then the source must contain a matching column for every + * property in the entity definition, including computed ones. If such a column is not + * found, an {@link IllegalArgumentException} will be thrown. + * + *

        If {@code true}, the mapper will operate on a best-effort basis and attempt to read all + * entity properties that have a matching column in the source, leaving unmatched properties + * untouched. Beware that this may result in a partially-populated entity instance. + */ + boolean lenient() default false; +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java index 74aa433287e..0b064b8597d 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/HierarchyScanStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Increment.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Increment.java new file mode 100644 index 00000000000..bb86fa5b8ab --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Increment.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.annotations; + +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.function.Function; +import java.util.function.UnaryOperator; + +/** + * Annotates a {@link Dao} method that increments a counter table that is mapped to an {@link + * Entity}-annotated class. + * + *

        Example: + * + *

        + * @Entity
        + * public class Votes {
        + *   @PartitionKey private int articleId;
        + *   private long upVotes;
        + *   private long downVotes;
        + *   ... // constructor(s), getters and setters, etc.
        + * }
        + * @Dao
        + * public interface VotesDao {
        + *   @Increment(entityClass = Votes.class)
        + *   void incrementUpVotes(int articleId, long upVotes);
        + *
        + *   @Increment(entityClass = Votes.class)
        + *   void incrementDownVotes(int articleId, long downVotes);
        + *
        + *   @Select
        + *   Votes findById(int articleId);
        + * }
        + * 
        + * + *

        Parameters

        + * + * The entity class must be specified with {@link #entityClass()}. + * + *

        The method's parameters must start with the full primary key, in the exact order (as defined + * by the {@link PartitionKey} and {@link ClusteringColumn} annotations in the entity class). The + * parameter names don't necessarily need to match the names of the columns, but the types must + * match. Unlike other methods like {@link Select} or {@link Delete}, counter updates cannot operate + * on a whole partition, they need to target exactly one row; so all the partition key and + * clustering columns must be specified. + * + *

        Then must follow one or more parameters representing counter increments. Their type must be + * {@code long} or {@link Long}. The name of the parameter must match the name of the entity + * property that maps to the counter (that is, the name of the getter without "get" and + * decapitalized). Alternatively, you may annotate a parameter with {@link CqlName} to specify the + * raw column name directly; in that case, the name of the parameter does not matter: + * + *

        + * @Increment(entityClass = Votes.class)
        + * void incrementUpVotes(int articleId, @CqlName("up_votes") long foobar);
        + * 
        + * + * When you invoke the method, each parameter value is interpreted as a delta that will be + * applied to the counter. In other words, if you pass 1, the counter will be incremented by 1. + * Negative values are allowed. If you are using Cassandra 2.2 or above, you can use {@link Long} + * and pass {@code null} for some of the parameters, they will be ignored (following {@link + * NullSavingStrategy#DO_NOT_SET} semantics). If you are using Cassandra 2.1, {@code null} values + * will trigger a runtime error. + * + *

        A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link + * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last + * parameter. It will be applied to the statement before execution. This allows you to customize + * certain aspects of the request (page size, timeout, etc) at runtime. + * + *

        Return type

        + * + *

        The method can return {@code void}, a void {@link CompletionStage} or {@link + * CompletableFuture}, or a {@link ReactiveResultSet}. + * + *

        Target keyspace and table

        + * + *

        If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated + * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the + * mapper was built from a {@link Session} that has a {@linkplain + * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. + * + *

        If a table was specified when creating the DAO, then the generated query targets that table. + * Otherwise, it uses the default table name for the entity (which is determined by the name of the + * entity class and the naming convention). + */ +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +public @interface Increment { + + /** + * A hint to indicate the entity class that is being targeted. This is mandatory, the mapper will + * issue a compile error if you leave it unset. + * + *

        Note that, for technical reasons, this is an array, but only one element is expected. If you + * specify more than one class, the mapper processor will generate a compile-time warning, and + * proceed with the first one. + */ + Class[] entityClass() default {}; +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java index a43f2b04e45..602a673d8a7 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Insert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +17,14 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -91,6 +96,12 @@ * @Insert * ResultSet save(Product product); * + *

      16. a {@link BoundStatement} This is intended for cases where you intend to execute this + * statement later or in a batch: + *
        + * @Insert
        + * BoundStatement save(Product product);
        + *      
        *
      17. a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The mapper will * execute the query asynchronously. *
        @@ -103,6 +114,12 @@
          * @Insert(ifNotExists = true)
          * CompletableFuture<Optional<Product>> insertIfNotExists(Product product);
          *       
        + *
      18. a {@link ReactiveResultSet}. + *
        + * @Insert
        + * ReactiveResultSet insertReactive(Product product);
        + *       
        + *
      19. a {@linkplain MapperResultProducer custom type}. * * *

        Target keyspace and table

        @@ -117,7 +134,7 @@ * entity class and the naming convention). */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Insert { /** Whether to append an IF NOT EXISTS clause at the end of the generated INSERT query. */ boolean ifNotExists() default false; diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java index 163095c3758..8cdaf28fc51 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Mapper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,7 +49,7 @@ *

        The interface should define one or more {@link DaoFactory} methods. */ @Target(ElementType.TYPE) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Mapper { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java index fc947f16239..b5121b144b2 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/NamingStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,7 +43,7 @@ * NamingConvention#SNAKE_CASE_INSENSITIVE}. */ @Target(ElementType.TYPE) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface NamingStrategy { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java index d062efad9cb..1dff4280f5b 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PartitionKey.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,7 +50,7 @@ *

        This annotation is mutually exclusive with {@link ClusteringColumn}. */ @Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface PartitionKey { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.java new file mode 100644 index 00000000000..4d66fd84e33 --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/PropertyStrategy.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.annotations; + +import com.datastax.oss.driver.api.mapper.entity.naming.GetterStyle; +import com.datastax.oss.driver.api.mapper.entity.naming.SetterStyle; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotates an {@link Entity} to customize certain aspects of the introspection process that + * determines which methods are considered as properties, and how new instances will be created. + * + *

        Example: + * + *

        + * @Entity
        + * @PropertyStrategy(getterStyle = FLUENT)
        + * public class Account {
        + *   ...
        + * }
        + * 
        + * + * This annotation can be inherited from an interface or parent class. + * + *

        When neither the entity class nor any of its parent is explicitly annotated, the mapper will + * assume context-dependent defaults: + * + *

          + *
        • for a Scala case class: {@code mutable = false} and {@code getterStyle = FLUENT}. The + * mapper detects this case by checking if the entity implements {@code scala.Product}. + *
        • for a Kotlin data class: {@code mutable = false} and {@code getterStyle = JAVABEANS}. The + * mapper detects this case by checking if the entity is annotated with {@code + * kotlin.Metadata}, and if it has any method named {@code component1} (both of these are + * added automatically by the Kotlin compiler). + *
        • Java records (JDK 14 and above): {@code mutable = false} and {@code getterStyle = FLUENT}. + * The mapper detects this case by checking if the entity extends {@code java.lang.Record}. + *
        • any other case: {@code mutable = true}, {@code getterStyle = JAVABEANS} and {@code + * setterStyle = JAVABEANS}. + *
        + * + * Not that this only applies if the annotation is completely absent. If it is present with only + * some of its attributes, the remaining attributes will get the default declared by the annotation, + * not the context-dependent default above (for example, if a Kotlin data class is annotated with + * {@code @PropertyStrategy(getterStyle = FLUENT)}, it will be mutable). + */ +@Target({ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +public @interface PropertyStrategy { + + /** The style of getter. See {@link GetterStyle} and its constants for more explanations. */ + GetterStyle getterStyle() default GetterStyle.JAVABEANS; + + /** + * The style of setter. See {@link SetterStyle} and its constants for more explanations. + * + *

        This has no effect if {@link #mutable()} is false. + */ + SetterStyle setterStyle() default SetterStyle.JAVABEANS; + + /** + * Whether the entity is mutable. + * + *

        If this is set to false: + * + *

          + *
        • the mapper won't try to discover setters for the properties; + *
        • it will assume that the entity class has a visible constructor that takes all the + * non-transient properties as arguments. + *
        + */ + boolean mutable() default true; +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java index 5292ba65875..c362453bb3a 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Query.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +17,19 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; import com.datastax.oss.driver.api.core.PagingIterable; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -83,11 +89,15 @@ *
      20. an {@link Optional} of an entity class. The method will extract the first row and convert * it, or return {@code Optional.empty()} if the result set is empty. *
      21. a {@link ResultSet}. The method will return the raw query result, without any conversion. + *
      22. a {@link BoundStatement}. This is intended for cases where you intend to execute this + * statement later or in a batch: *
      23. a {@link PagingIterable}. The method will convert each row into an entity instance. *
      24. a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The method will * execute the query asynchronously. Note that for result sets and iterables, you need to * switch to the asynchronous equivalent {@link AsyncResultSet} and {@link * MappedAsyncPagingIterable} respectively. + *
      25. a {@link ReactiveResultSet}, or a {@link MappedReactiveResultSet} of the entity class. + *
      26. a {@linkplain MapperResultProducer custom type}. * * *

        Target keyspace and table

        @@ -137,7 +147,7 @@ * */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Query { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java index 4c6d3951c8e..d8194d12e8b 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/QueryProvider.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,7 +59,7 @@ * @see MapperBuilder#withCustomState(Object, Object) */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface QueryProvider { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SchemaHint.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SchemaHint.java new file mode 100644 index 00000000000..d680798ba5a --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SchemaHint.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.annotations; + +import com.datastax.oss.driver.api.mapper.MapperBuilder; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotates an entity to indicate which type of schema element it is supposed to map to. This is + * only used to optimize {@linkplain MapperBuilder#withSchemaValidationEnabled(boolean) schema + * validation}, it has no impact on query execution. + * + *

        Example: + * + *

        + * @Entity
        + * @SchemaHint(targetElement = SchemaHint.TargetElement.TABLE)
        + * public class Product {
        + *   // fields of the entity
        + * }
        + * 
        + * + *

        By default, the mapper first tries to match the entity with a table, and if that doesn't work, + * with a UDT. This annotation allows you to provide a hint as to which check should be done, so + * that the mapper can skip the other one. + * + *

        In addition, you can ask to completely skip the validation for this entity by using {@link + * TargetElement#NONE}. + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface SchemaHint { + TargetElement targetElement(); + + enum TargetElement { + TABLE, + UDT, + NONE, + ; + } +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java index 6238e1e3d5f..46c7994809d 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Select.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +17,13 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.MappedAsyncPagingIterable; import com.datastax.oss.driver.api.core.PagingIterable; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -50,7 +54,9 @@ * (partition key + clustering columns). The method's parameters must match the types of the primary * key columns, in the exact order (which is defined by the integer values of the {@link * PartitionKey} and {@link ClusteringColumn} annotations in the entity class). The parameter names - * don't necessarily need to match the names of the columns. + * don't necessarily need to match the names of the columns. It is also possible for the method to + * only take a partial primary key (the first n columns), in which case it will return + * multiple entities. * *

        If {@link #customWhereClause()} is not empty, it completely replaces the WHERE clause. The * provided string can contain named placeholders. In that case, the method must have a @@ -61,6 +67,21 @@ * PagingIterable<Product> findByDescription(String searchString); * * + * The generated SELECT query can be further customized with {@link #limit()}, {@link + * #perPartitionLimit()}, {@link #orderBy()}, {@link #groupBy()} and {@link #allowFiltering()}. Some + * of these clauses can also contain placeholders whose values will be provided through additional + * method parameters. Note that it is sometimes not possible to determine if a parameter is a + * primary key component or a placeholder value; therefore the rule is that if your method takes + * a partial primary key, the first parameter that is not a primary key component must be explicitly + * annotated with {@link CqlName}. For example if the primary key is {@code ((day int, hour int, + * minute int), ts timestamp)}: + * + *

        + * // Annotate 'l' so that it's not mistaken for the second PK component
        + * @Select(limit = ":l")
        + * PagingIterable<Sale> findDailySales(int day, @CqlName("l") int l);
        + * 
        + * *

        A {@link Function Function<BoundStatementBuilder, BoundStatementBuilder>} or {@link * UnaryOperator UnaryOperator<BoundStatementBuilder>} can be added as the last * parameter. It will be applied to the statement before execution. This allows you to customize @@ -103,6 +124,12 @@ * @Select(customWhereClause = "description LIKE :searchString") * CompletionStage<MappedAsyncPagingIterable<Product>> findByDescriptionAsync(String searchString); * + *

      27. a {@link MappedReactiveResultSet} of the entity class. + *
        + * @Select(customWhereClause = "description LIKE :searchString")
        + * MappedReactiveResultSet<Product> findByDescriptionReactive(String searchString);
        + *       
        + *
      28. a {@linkplain MapperResultProducer custom type}. * * *

        Target keyspace and table

        @@ -117,7 +144,7 @@ * entity class and the naming convention). */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Select { /** @@ -130,4 +157,50 @@ * the top-level javadocs of this class for more explanations. */ String customWhereClause() default ""; + + /** + * The LIMIT to use in the SELECT query. + * + *

        If this starts with ":", it is interpreted as a named placeholder (that must have a + * corresponding parameter in the method signature). Otherwise, it must be a literal integer + * value. + * + *

        If the placeholder name is invalid or the literal can't be parsed as an integer (according + * to the rules of {@link Integer#parseInt(String)}), the mapper will issue a compile-time + * warning. + */ + String limit() default ""; + + /** + * The PER PARTITION LIMIT to use in the SELECT query. + * + *

        If this starts with ":", it is interpreted as a named placeholder (that must have a + * corresponding parameter in the method signature). Otherwise, it must be a literal integer + * value. + * + *

        If the placeholder name is invalid or the literal can't be parsed as an integer (according + * to the rules of {@link Integer#parseInt(String)}), the mapper will issue a compile-time + * warning. + */ + String perPartitionLimit() default ""; + + /** + * A list of orderings to add to an ORDER BY clause in the SELECT query. + * + *

        Each element must be a column name followed by a space and the word "ASC" or "DESC". If + * there are multiple columns, pass an array: + * + *

        +   * @Select(orderBy = {"hour DESC", "minute DESC"})
        +   * 
        + * + *

        If an element can't be parsed, the mapper will issue a compile-time error. + */ + String[] orderBy() default {}; + + /** A list of column names to be added to a GROUP BY clause in the SELECT query. */ + String[] groupBy() default {}; + + /** Whether to add an ALLOW FILTERING clause to the SELECT query. */ + boolean allowFiltering() default false; } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java index 9b4a9d74a08..cc1cb9b7e88 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/SetEntity.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -88,7 +90,7 @@ * compile-time warning. */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface SetEntity { /** @@ -98,4 +100,17 @@ * or {@link NullSavingStrategy#DO_NOT_SET}. */ NullSavingStrategy nullSavingStrategy() default NullSavingStrategy.DO_NOT_SET; + + /** + * Whether to tolerate missing columns in the target data structure. + * + *

        If {@code false} (the default), then the target must contain a matching column for every + * property in the entity definition, except computed ones. If such a column is not + * found, an {@link IllegalArgumentException} will be thrown. + * + *

        If {@code true}, the mapper will operate on a best-effort basis and attempt to write all + * entity properties that have a matching column in the target, leaving unmatched properties + * untouched. Beware that this may result in a partially-populated target. + */ + boolean lenient() default false; } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java index d62e55fd007..56f32432ea8 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/StatementAttributes.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +35,7 @@ * second. */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface StatementAttributes { /** * The name of the execution profile to use. diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java index 91836b8f6b1..1db111ccfd2 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Transient.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,5 +38,5 @@ * ClusteringColumn} annotations. */ @Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Transient {} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java index 1206f82e0f3..97b8c5c99a2 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/TransientProperties.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,6 +17,11 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + /** * Annotates an {@link Entity} to indicate which properties should be considered 'transient', * meaning that they should not be mapped to any column (neither during reads nor writes). @@ -39,6 +46,8 @@ * implementing classes will share a common configuration without needing to explicitly annotate * each property with a {@link Transient} annotation. */ +@Target({ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) public @interface TransientProperties { /** Specifies a list of property names that should be considered transient. */ diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java index 8f816c60bee..02930d73aa4 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/annotations/Update.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +17,15 @@ */ package com.datastax.oss.driver.api.mapper.annotations; +import com.datastax.dse.driver.api.core.cql.reactive.ReactiveResultSet; import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -46,8 +51,8 @@ * *

        Parameters

        * - * The first parameter must be an entity instance. All of its non-PK properties will be interpreted - * as values to update. + *

        The first parameter must be an entity instance. All of its non-PK properties will be + * interpreted as values to update. * *

          *
        • If {@link #customWhereClause()} is empty, the mapper defaults to an update by primary key @@ -85,7 +90,7 @@ * *

          Return type

          * - * The method can return: + *

          The method can return: * *

            *
          • {@code void}. @@ -103,6 +108,12 @@ * ResultSet updateIfDescriptionMatches(Product product, String expectedDescription); * // if the condition fails, the result set will contain columns '[applied]' and 'description' * + *
          • a {@link BoundStatement}. This is intended for queries where you will execute this + * statement later or in a batch: + *
            + * @Update
            + * BoundStatement update(Product product);
            + *      
            *
          • a {@link CompletionStage} or {@link CompletableFuture} of any of the above. The mapper will * execute the query asynchronously. Note that for result sets, you need to switch to the * asynchronous equivalent {@link AsyncResultSet}. @@ -116,11 +127,17 @@ * @Update(customIfClause = "description = :expectedDescription") * CompletableFuture<AsyncResultSet> updateIfDescriptionMatches(Product product, String expectedDescription); * + *
          • a {@link ReactiveResultSet}. + *
            + * @Update
            + * ReactiveResultSet updateReactive(Product product);
            + *       
            + *
          • a {@linkplain MapperResultProducer custom type}. *
          * *

          Target keyspace and table

          * - * If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated + *

          If a keyspace was specified when creating the DAO (see {@link DaoFactory}), then the generated * query targets that keyspace. Otherwise, it doesn't specify a keyspace, and will only work if the * mapper was built from a {@link Session} that has a {@linkplain * SessionBuilder#withKeyspace(CqlIdentifier) default keyspace} set. @@ -130,7 +147,7 @@ * entity class and the naming convention). */ @Target(ElementType.METHOD) -@Retention(RetentionPolicy.CLASS) +@Retention(RetentionPolicy.RUNTIME) public @interface Update { /** diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java index 81f3144d529..653b02c5d0c 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/EntityHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,7 +51,20 @@ public interface EntityHelper { /** * Sets the properties of an entity instance into a target data structure. * - *

          For example: + * @deprecated Use {@link #set(Object, SettableByName, NullSavingStrategy, boolean)} instead. + */ + @NonNull + @Deprecated + > SettableT set( + @NonNull EntityT entity, + @NonNull SettableT target, + @NonNull NullSavingStrategy nullSavingStrategy); + + /** + * Sets the properties of an entity instance into a target data structure. + * + *

          The generated code will attempt to write all entity properties in the target data structure. + * For example: * *

          {@code
              * target = target.set("id", entity.getId(), UUID.class);
          @@ -59,25 +74,50 @@ public interface EntityHelper {
              *
              * The column names are inferred from the naming strategy for this entity.
              *
          +   * 

          The target will typically be one of the built-in driver subtypes: {@link BoundStatement}, + * {@link BoundStatementBuilder} or {@link UdtValue}. Note that the default {@link BoundStatement} + * implementation is immutable, therefore this argument won't be modified in-place: you need to + * use the return value to get the resulting structure. + * + *

          If {@code lenient} is {@code true}, the mapper will operate on a best-effort basis and + * attempt to write all entity properties that have a matching column in the target, leaving + * unmatched properties untouched. Beware that this may result in a partially-populated target. + * + *

          If {@code lenient} is {@code false}, then the target must contain a matching column for + * every property in the entity definition, except computed ones. If such a column is not + * found, an {@link IllegalArgumentException} will be thrown. + * * @param entity the entity that the values will be read from. - * @param target the data structure to fill. This will typically be one of the built-in driver - * subtypes: {@link BoundStatement}, {@link BoundStatementBuilder} or {@link UdtValue}. Note - * that the default {@link BoundStatement} implementation is immutable, therefore this - * argument won't be modified in-place: you need to use the return value to get the resulting - * structure. + * @param target the data structure to fill. + * @param lenient whether to tolerate incomplete targets. * @return the data structure resulting from the assignments. This is useful for immutable target * implementations (see above), otherwise it will be the same as {@code target}. + * @throws IllegalArgumentException if lenient is false and the target does not contain matching + * columns for every entity property. */ @NonNull - > SettableT set( + default > SettableT set( @NonNull EntityT entity, @NonNull SettableT target, - @NonNull NullSavingStrategy nullSavingStrategy); + @NonNull NullSavingStrategy nullSavingStrategy, + boolean lenient) { + return set(entity, target, nullSavingStrategy); + } /** * Gets values from a data structure to fill an entity instance. * - *

          For example: + * @deprecated Use {@link #get(GettableByName, boolean)} instead. + */ + @NonNull + @Deprecated + EntityT get(@NonNull GettableByName source); + + /** + * Gets values from a data structure to fill an entity instance. + * + *

          The generated code will attempt to read all entity properties from the source data + * structure. For example: * *

          {@code
              * User returnValue = new User();
          @@ -88,14 +128,29 @@ > SettableT set(
              *
              * The column names are inferred from the naming strategy for this entity.
              *
          -   * @param source the data structure to read from. This will typically be one of the built-in
          -   *     driver subtypes: {@link Row} or {@link UdtValue} ({@link BoundStatement} and {@link
          -   *     BoundStatementBuilder} are also possible, although it's less likely that data would be read
          -   *     back from them in this manner).
          +   * 

          The source will typically be one of the built-in driver subtypes: {@link Row} or {@link + * UdtValue} ({@link BoundStatement} and {@link BoundStatementBuilder} are also possible, although + * it's less likely that data would be read back from them in this manner). + * + *

          If {@code lenient} is {@code true}, the mapper will operate on a best-effort basis and + * attempt to read all entity properties that have a matching column in the source, leaving + * unmatched properties untouched. Beware that this may result in a partially-populated entity + * instance. + * + *

          If {@code lenient} is {@code false}, then the source must contain a matching column for + * every property in the entity definition, including computed ones. If such a column is + * not found, an {@link IllegalArgumentException} will be thrown. + * + * @param source the data structure to read from. + * @param lenient whether to tolerate incomplete sources. * @return the resulting entity. + * @throws IllegalArgumentException if lenient is false and the source does not contain matching + * columns for every entity property. */ @NonNull - EntityT get(@NonNull GettableByName source); + default EntityT get(@NonNull GettableByName source, boolean lenient) { + return get(source); + } /** * Builds an insert query for this entity. diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/GetterStyle.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/GetterStyle.java new file mode 100644 index 00000000000..21e4755f4dd --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/GetterStyle.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.entity.naming; + +import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; + +/** + * The style of getter that the mapper will look for when introspecting an entity class. + * + *

          Note that introspection always starts by looking for getters first: no-arg, non-void methods + * that follow the configured style. Then the mapper will try to find a matching field (which is not + * required), and, if the entity is mutable, a setter. + * + * @see PropertyStrategy + */ +public enum GetterStyle { + + /** + * "JavaBeans" style: the method name must start with "get", or "is" for boolean properties. The + * name of the property is the getter name without a prefix, and decapitalized, for example {@code + * int getFoo() => foo}. + */ + JAVABEANS, + + /** + * "Fluent" style: any name will match (as long as the no-arg, not-void rule also holds), and is + * considered to be the property name without any prefix. For example {@code int foo() => foo}. + * + *

          Note that this is the convention used in compiled Scala case classes. Whenever the mapper + * processes a type that implements {@code scala.Product}, it will switch to this style by + * default. + */ + FLUENT, + ; +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java index efc3b2d006f..ac9d05895b9 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NameConverter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java index 0a3913aa93e..8846e4f6fcb 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/NamingConvention.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/SetterStyle.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/SetterStyle.java new file mode 100644 index 00000000000..26e301c5f76 --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/naming/SetterStyle.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.entity.naming; + +import com.datastax.oss.driver.api.mapper.annotations.PropertyStrategy; + +/** + * The style of setter that the mapper will look for when introspecting a mutable entity class. + * + *

          Note that introspection always starts by looking for getters first (see {@link GetterStyle}). + * Once a getter has been found, and if the entity is declared as {@link PropertyStrategy#mutable() + * mutable}, the mapper will try to find a matching setter: name inferred as described below, + * exactly one argument matching the property type, and the return type does not matter. + * + * @see PropertyStrategy + */ +public enum SetterStyle { + + /** + * "JavaBeans" style: the method name must start with "set", for example {@code int foo => + * setFoo(int)}. + */ + JAVABEANS, + + /** + * "Fluent" style: the method name must be the name of the property, without any prefix, for + * example {@code int foo => foo(int)}. + */ + FLUENT, + ; +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java index 9b0b668b69e..f2233e5721c 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/entity/saving/NullSavingStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.java new file mode 100644 index 00000000000..d262986b75f --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducer.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.result; + +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.data.GettableByName; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.api.mapper.MapperContext; +import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.concurrent.CompletionStage; + +/** + * A component that can be plugged into the object mapper, in order to return custom result types + * from DAO methods. + * + *

          For example, this could be used to substitute a 3rd-party future implementation for {@link + * CompletionStage}: + * + *

          + * public class CustomFutureProducer implements MapperResultProducer {
          + *   ...
          + * }
          + * 
          + * + *

          Producers are registered via the Java Service Provider mechanism (see {@link + * MapperResultProducerService}). DAO methods can then use the new type: + * + *

          + * @Dao
          + * public interface ProductDao {
          + *   @Select
          + *   CustomFuture<Product> findById(UUID productId);
          + * }
          + * 
          + * + * See the javadocs of the methods in this interface for more explanations. + */ +public interface MapperResultProducer { + + /** + * Checks if this producer can handle a particular result type. + * + *

          This will be invoked at runtime to select a producer: if a DAO method declares a return type + * that is not supported natively, then the mapper generates an implementation which, for every + * invocation, iterates through all the producers in the order that they were registered, + * and picks the first one where {@code canProduce()} returns true. + * + * @param resultType the DAO method's declared return type. If checking the top-level type is + * sufficient, then {@link GenericType#getRawType()} should do the trick. If you need to + * recurse into the type arguments, call {@link GenericType#getType()} and use the {@code + * java.lang.reflect} APIs. + */ + boolean canProduce(@NonNull GenericType resultType); + + /** + * Executes the statement generated by the mapper, and converts the result to the expected type. + * + *

          This will be executed at runtime, every time the DAO method is called. + * + * @param statement the statement, ready to execute: the mapper has already bound all the values, + * and set all the necessary attributes (consistency, page size, etc). + * @param context the context in which the DAO method is executed. In particular, this is how you + * get access to the {@linkplain MapperContext#getSession() session}. + * @param entityHelper if the type to produce contains a mapped entity (e.g. {@code + * ListenableFuture}), an instance of the helper class to manipulate that entity. In + * particular, {@link EntityHelper#get(GettableByName) entityHelper.get()} allows you to + * convert rows into entity instances. If the type to produce does not contain an entity, this + * will be {@code null}. + * @return the object to return from the DAO method. This must match the type that this producer + * was selected for, there will be an unchecked cast at runtime. + */ + @Nullable + Object execute( + @NonNull Statement statement, + @NonNull MapperContext context, + @Nullable EntityHelper entityHelper); + + /** + * Surfaces any error encountered in the DAO method (either in the generated mapper code that + * builds the statement, or during invocation of {@link #execute}). + * + *

          For some result types, it is expected that errors will be wrapped in some sort of container + * instead of thrown directly; for example a failed future or publisher. + * + *

          If rethrowing is the right thing to do, then it is perfectly fine to do so from this method. + * If you throw checked exceptions, they will be propagated directly if the DAO method also + * declares them, or wrapped into a {@link RuntimeException} otherwise. + */ + @Nullable + Object wrapError(@NonNull Exception e) throws Exception; +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.java new file mode 100644 index 00000000000..b8afdba53b1 --- /dev/null +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/api/mapper/result/MapperResultProducerService.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.mapper.result; + +/** + * Provides the custom mapper result types that will be used in an application. + * + *

          This class is loaded with the Java Service Provider Interface mechanism, you must reference it + * via a service descriptor: create a file {@code + * META-INF/services/com.datastax.oss.driver.api.mapper.result.MapperResultProducerService}, with + * one or more lines, each referencing the name of an implementing class. + */ +public interface MapperResultProducerService { + + /** + * Returns the producers provided by this service. + * + *

          Note that order matters, the producers will be tried from left to right until one matches. + * If there is some overlap between your producers' {@link MapperResultProducer#canProduce + * canProduce()} implementations, put the most specific ones first. + */ + Iterable getProducers(); +} diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java index 6ceb62799a0..5f617de52e1 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,11 +37,14 @@ import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; import com.datastax.oss.driver.internal.core.ConsistencyLevelRegistry; import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.cql.ResultSets; import com.datastax.oss.protocol.internal.ProtocolConstants; import java.time.Duration; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; /** Base class for generated implementations of {@link Dao}-annotated interfaces. */ public class DaoBase { @@ -57,6 +62,11 @@ public class DaoBase { protected static CompletionStage prepare( SimpleStatement statement, MapperContext context) { + if (context.getExecutionProfileName() != null) { + statement = statement.setExecutionProfileName(context.getExecutionProfileName()); + } else if (context.getExecutionProfile() != null) { + statement = statement.setExecutionProfile(context.getExecutionProfile()); + } return context.getSession().prepareAsync(statement); } @@ -163,15 +173,17 @@ public BoundStatementBuilder populateBoundStatementWithStatementAttributes( } private ConsistencyLevel getConsistencyLevelFromName(String name) { - InternalDriverContext idContext = (InternalDriverContext) (context.getSession().getContext()); + InternalDriverContext idContext = (InternalDriverContext) context.getSession().getContext(); ConsistencyLevelRegistry registry = idContext.getConsistencyLevelRegistry(); return registry.codeToLevel(registry.nameToCode(name)); } protected final MapperContext context; + protected final boolean isProtocolVersionV3; protected DaoBase(MapperContext context) { this.context = context; + this.isProtocolVersionV3 = isProtocolVersionV3(context); } protected ResultSet execute(Statement statement) { @@ -215,12 +227,12 @@ protected EntityT executeAndMapToSingleEntity( private EntityT asEntity(Row row, EntityHelper entityHelper) { return (row == null - // Special case for INSERT IF NOT EXISTS. If the row did not exists, the query returns + // Special case for INSERT IF NOT EXISTS. If the row did not exist, the query returns // only [applied], we want to return null to indicate there was no previous entity || (row.getColumnDefinitions().size() == 1 && row.getColumnDefinitions().get(0).getName().equals(APPLIED))) ? null - : entityHelper.get(row); + : entityHelper.get(row, false); } protected Optional executeAndMapToOptionalEntity( @@ -230,7 +242,13 @@ protected Optional executeAndMapToOptionalEntity( protected PagingIterable executeAndMapToEntityIterable( Statement statement, EntityHelper entityHelper) { - return execute(statement).map(entityHelper::get); + return execute(statement).map(row -> entityHelper.get(row, false)); + } + + protected Stream executeAndMapToEntityStream( + Statement statement, EntityHelper entityHelper) { + return StreamSupport.stream( + execute(statement).map(row -> entityHelper.get(row, false)).spliterator(), false); } protected CompletableFuture executeAsync(Statement statement) { @@ -273,16 +291,27 @@ protected CompletableFuture> executeAsyncAndMapToOpt protected CompletableFuture> executeAsyncAndMapToEntityIterable( Statement statement, EntityHelper entityHelper) { - return executeAsync(statement).thenApply(rs -> rs.map(entityHelper::get)); + return executeAsync(statement).thenApply(rs -> rs.map(row -> entityHelper.get(row, false))); + } + + protected CompletableFuture> executeAsyncAndMapToEntityStream( + Statement statement, EntityHelper entityHelper) { + return executeAsync(statement) + .thenApply(ResultSets::newInstance) + .thenApply(rs -> StreamSupport.stream(rs.map(entityHelper::get).spliterator(), false)); } protected static void throwIfProtocolVersionV3(MapperContext context) { - if (context.getSession().getContext().getProtocolVersion().getCode() - <= ProtocolConstants.Version.V3) { + if (isProtocolVersionV3(context)) { throw new MapperException( String.format( "You cannot use %s.%s for protocol version V3.", NullSavingStrategy.class.getSimpleName(), NullSavingStrategy.DO_NOT_SET.name())); } } + + protected static boolean isProtocolVersionV3(MapperContext context) { + return context.getSession().getContext().getProtocolVersion().getCode() + <= ProtocolConstants.Version.V3; + } } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java index 72f96a7ed82..32fae259769 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DaoCacheKey.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,28 +18,49 @@ package com.datastax.oss.driver.internal.mapper; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import java.util.Objects; public class DaoCacheKey { private final CqlIdentifier keyspaceId; private final CqlIdentifier tableId; + private final String executionProfileName; + private final DriverExecutionProfile executionProfile; - public DaoCacheKey(CqlIdentifier keyspaceId, CqlIdentifier tableId) { + public DaoCacheKey( + CqlIdentifier keyspaceId, + CqlIdentifier tableId, + String executionProfileName, + DriverExecutionProfile executionProfile) { this.keyspaceId = keyspaceId; this.tableId = tableId; + this.executionProfileName = executionProfileName; + this.executionProfile = executionProfile; } - public DaoCacheKey(CqlIdentifier keyspaceId, String tableName) { - this(keyspaceId, toId(tableName)); + public DaoCacheKey( + CqlIdentifier keyspaceId, + String tableName, + String executionProfileName, + DriverExecutionProfile executionProfile) { + this(keyspaceId, toId(tableName), executionProfileName, executionProfile); } - public DaoCacheKey(String keyspaceName, CqlIdentifier tableId) { - this(toId(keyspaceName), tableId); + public DaoCacheKey( + String keyspaceName, + CqlIdentifier tableId, + String executionProfileName, + DriverExecutionProfile executionProfile) { + this(toId(keyspaceName), tableId, executionProfileName, executionProfile); } - public DaoCacheKey(String keyspaceName, String tableName) { - this(toId(keyspaceName), toId(tableName)); + public DaoCacheKey( + String keyspaceName, + String tableName, + String executionProfileName, + DriverExecutionProfile executionProfile) { + this(toId(keyspaceName), toId(tableName), executionProfileName, executionProfile); } private static CqlIdentifier toId(String name) { @@ -52,6 +75,14 @@ public CqlIdentifier getTableId() { return tableId; } + public String getExecutionProfileName() { + return executionProfileName; + } + + public DriverExecutionProfile getExecutionProfile() { + return executionProfile; + } + @Override public boolean equals(Object other) { if (other == this) { @@ -59,7 +90,9 @@ public boolean equals(Object other) { } else if (other instanceof DaoCacheKey) { DaoCacheKey that = (DaoCacheKey) other; return Objects.equals(this.keyspaceId, that.keyspaceId) - && Objects.equals(this.tableId, that.tableId); + && Objects.equals(this.tableId, that.tableId) + && Objects.equals(this.executionProfileName, that.executionProfileName) + && Objects.equals(this.executionProfile, that.executionProfile); } else { return false; } @@ -67,6 +100,6 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(keyspaceId, tableId); + return Objects.hash(keyspaceId, tableId, executionProfileName, executionProfile); } } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java index 1169c833cfc..2d09c2e853f 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/DefaultMapperContext.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,32 +19,56 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.api.mapper.MapperContext; import com.datastax.oss.driver.api.mapper.MapperException; import com.datastax.oss.driver.api.mapper.entity.naming.NameConverter; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducer; +import com.datastax.oss.driver.api.mapper.result.MapperResultProducerService; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; import com.datastax.oss.protocol.internal.util.collection.NullAllowingImmutableMap; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.lang.reflect.InvocationTargetException; import java.util.Map; import java.util.Objects; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class DefaultMapperContext implements MapperContext { + private static final Logger LOGGER = LoggerFactory.getLogger(DefaultMapperContext.class); + + private final ConcurrentMap, MapperResultProducer> resultProducerCache = + new ConcurrentHashMap<>(); + private final CqlSession session; private final CqlIdentifier keyspaceId; private final CqlIdentifier tableId; + private final String executionProfileName; + private final DriverExecutionProfile executionProfile; private final ConcurrentMap, NameConverter> nameConverterCache; private final Map customState; + private final ImmutableList resultProducers; public DefaultMapperContext( - @NonNull CqlSession session, @NonNull Map customState) { + @NonNull CqlSession session, + @Nullable CqlIdentifier keyspaceId, + @Nullable String executionProfileName, + @Nullable DriverExecutionProfile executionProfile, + @NonNull Map customState) { this( session, + keyspaceId, null, - null, + executionProfileName, + executionProfile, new ConcurrentHashMap<>(), NullAllowingImmutableMap.copyOf(customState)); } @@ -51,22 +77,43 @@ private DefaultMapperContext( CqlSession session, CqlIdentifier keyspaceId, CqlIdentifier tableId, + String executionProfileName, + DriverExecutionProfile executionProfile, ConcurrentMap, NameConverter> nameConverterCache, Map customState) { + if (executionProfile != null && executionProfileName != null) { + // the mapper code prevents this, so we should never get here + throw new IllegalArgumentException("Can't provide both a profile and a name"); + } this.session = session; this.keyspaceId = keyspaceId; this.tableId = tableId; this.nameConverterCache = nameConverterCache; this.customState = customState; + this.executionProfileName = executionProfileName; + this.executionProfile = executionProfile; + this.resultProducers = + locateResultProducers(((InternalDriverContext) session.getContext()).getClassLoader()); } - public DefaultMapperContext withKeyspaceAndTable( - @Nullable CqlIdentifier newKeyspaceId, @Nullable CqlIdentifier newTableId) { + public DefaultMapperContext withDaoParameters( + @Nullable CqlIdentifier newKeyspaceId, + @Nullable CqlIdentifier newTableId, + @Nullable String newExecutionProfileName, + @Nullable DriverExecutionProfile newExecutionProfile) { return (Objects.equals(newKeyspaceId, this.keyspaceId) - && Objects.equals(newTableId, this.tableId)) + && Objects.equals(newTableId, this.tableId) + && Objects.equals(newExecutionProfileName, this.executionProfileName) + && Objects.equals(newExecutionProfile, this.executionProfile)) ? this : new DefaultMapperContext( - session, newKeyspaceId, newTableId, nameConverterCache, customState); + session, + newKeyspaceId, + newTableId, + newExecutionProfileName, + newExecutionProfile, + nameConverterCache, + customState); } @NonNull @@ -87,6 +134,18 @@ public CqlIdentifier getTableId() { return tableId; } + @Nullable + @Override + public String getExecutionProfileName() { + return executionProfileName; + } + + @Nullable + @Override + public DriverExecutionProfile getExecutionProfile() { + return executionProfile; + } + @NonNull @Override public NameConverter getNameConverter(Class converterClass) { @@ -100,6 +159,24 @@ public Map getCustomState() { return customState; } + @NonNull + @Override + public MapperResultProducer getResultProducer(@NonNull GenericType resultToProduce) { + return resultProducerCache.computeIfAbsent( + resultToProduce, + k -> { + for (MapperResultProducer resultProducer : resultProducers) { + if (resultProducer.canProduce(k)) { + return resultProducer; + } + } + throw new IllegalArgumentException( + String.format( + "Found no registered %s that can produce %s", + MapperResultProducer.class.getSimpleName(), k)); + }); + } + private static NameConverter buildNameConverter(Class converterClass) { try { return converterClass.getDeclaredConstructor().newInstance(); @@ -115,4 +192,23 @@ private static NameConverter buildNameConverter(Class c e); } } + + private static ImmutableList locateResultProducers( + ClassLoader classLoader) { + LOGGER.debug( + "Locating result producers with CL = {}, MapperResultProducerService CL = {}", + classLoader, + MapperResultProducerService.class.getClassLoader()); + ImmutableList.Builder builder = ImmutableList.builder(); + try { + ServiceLoader loader = + ServiceLoader.load(MapperResultProducerService.class, classLoader); + loader.iterator().forEachRemaining(provider -> builder.addAll(provider.getProducers())); + } catch (Exception | ServiceConfigurationError e) { + LOGGER.error("Failed to locate result producers", e); + } + ImmutableList producers = builder.build(); + LOGGER.debug("Located {} result producers: {}", producers.size(), producers); + return producers; + } } diff --git a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java index 9b17935702c..3977ea0c451 100644 --- a/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java +++ b/mapper-runtime/src/main/java/com/datastax/oss/driver/internal/mapper/entity/EntityHelperBase.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +18,36 @@ package com.datastax.oss.driver.internal.mapper.entity; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.data.AccessibleByName; +import com.datastax.oss.driver.api.core.data.GettableByName; +import com.datastax.oss.driver.api.core.data.SettableByName; +import com.datastax.oss.driver.api.core.data.UdtValue; +import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; +import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; +import com.datastax.oss.driver.api.mapper.MapperBuilder; import com.datastax.oss.driver.api.mapper.MapperContext; import com.datastax.oss.driver.api.mapper.MapperException; +import com.datastax.oss.driver.api.mapper.annotations.Dao; import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; import com.datastax.oss.driver.api.mapper.annotations.Entity; import com.datastax.oss.driver.api.mapper.entity.EntityHelper; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.internal.core.util.CollectionsUtils; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; public abstract class EntityHelperBase implements EntityHelper { @@ -62,7 +86,24 @@ public CqlIdentifier getTableId() { return tableId; } - protected void throwIfKeyspaceMissing() { + @NonNull + @Override + @Deprecated + public > SettableT set( + @NonNull EntityT entity, + @NonNull SettableT target, + @NonNull NullSavingStrategy nullSavingStrategy) { + return set(entity, target, nullSavingStrategy, false); + } + + @NonNull + @Override + @Deprecated + public EntityT get(@NonNull GettableByName source) { + return get(source, false); + } + + public void throwIfKeyspaceMissing() { if (this.getKeyspaceId() == null && !context.getSession().getKeyspace().isPresent()) { throw new MapperException( String.format( @@ -75,4 +116,138 @@ protected void throwIfKeyspaceMissing() { DaoKeyspace.class.getSimpleName())); } } + + public List findMissingColumns( + List entityColumns, Collection cqlColumns) { + return findMissingCqlIdentifiers( + entityColumns, + cqlColumns.stream().map(ColumnMetadata::getName).collect(Collectors.toList())); + } + + public List findMissingCqlIdentifiers( + List entityColumns, Collection cqlColumns) { + List missingColumns = new ArrayList<>(); + for (CqlIdentifier entityCqlIdentifier : entityColumns) { + if (!cqlColumns.contains(entityCqlIdentifier)) { + missingColumns.add(entityCqlIdentifier); + } + } + return missingColumns; + } + + /** + * When the new instance of a class annotated with {@link Dao} is created an automatic check for + * schema validation is performed. It verifies if all {@link Dao} entity fields are present in CQL + * table. If not the {@link IllegalArgumentException} exception with detailed message is thrown. + * This check has startup overhead so once your app is stable you may want to disable it. The + * schema validation check is enabled by default. It can be disabled using the {@link + * MapperBuilder#withSchemaValidationEnabled(boolean)} method. + */ + public abstract void validateEntityFields(); + + public static List findTypeMismatches( + Map> entityColumns, + Map cqlColumns, + CodecRegistry codecRegistry) { + Map cqlColumnsDataTypes = + cqlColumns.entrySet().stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + cqlIdentifierColumnMetadataEntry -> + cqlIdentifierColumnMetadataEntry.getValue().getType())); + + return findDataTypeMismatches(entityColumns, cqlColumnsDataTypes, codecRegistry); + } + + public static List findTypeMismatches( + Map> entityColumns, + List cqlColumns, + List cqlTypes, + CodecRegistry codecRegistry) { + return findDataTypeMismatches( + entityColumns, + CollectionsUtils.combineListsIntoOrderedMap(cqlColumns, cqlTypes), + codecRegistry); + } + + private static List findDataTypeMismatches( + Map> entityColumns, + Map cqlColumns, + CodecRegistry codecRegistry) { + List missingCodecs = new ArrayList<>(); + + for (Map.Entry> entityEntry : entityColumns.entrySet()) { + DataType datType = cqlColumns.get(entityEntry.getKey()); + if (datType == null) { + // this will not happen because it will be catch by the generateMissingColumnsCheck() method + throw new AssertionError( + "There is no cql column for entity column: " + entityEntry.getKey()); + } + try { + codecRegistry.codecFor(datType, entityEntry.getValue()); + } catch (CodecNotFoundException exception) { + missingCodecs.add( + String.format( + "Field: %s, Entity Type: %s, CQL type: %s", + entityEntry.getKey(), exception.getJavaType(), exception.getCqlType())); + } + } + return missingCodecs; + } + + public void throwMissingUdtTypesIfNotEmpty( + List missingTypes, + CqlIdentifier keyspaceId, + CqlIdentifier tableId, + String entityClassName) { + throwMissingTypesIfNotEmpty(missingTypes, keyspaceId, tableId, entityClassName, "udt"); + } + + public void throwMissingTableTypesIfNotEmpty( + List missingTypes, + CqlIdentifier keyspaceId, + CqlIdentifier tableId, + String entityClassName) { + throwMissingTypesIfNotEmpty(missingTypes, keyspaceId, tableId, entityClassName, "table"); + } + + public void throwMissingTypesIfNotEmpty( + List missingTypes, + CqlIdentifier keyspaceId, + CqlIdentifier tableId, + String entityClassName, + String type) { + if (!missingTypes.isEmpty()) { + throw new IllegalArgumentException( + String.format( + "The CQL ks.%s: %s.%s defined in the entity class: %s declares type mappings that are not supported by the codec registry:\n%s", + type, keyspaceId, tableId, entityClassName, String.join("\n", missingTypes))); + } + } + + public boolean keyspaceNamePresent( + Map keyspaces, CqlIdentifier keyspaceId) { + return keyspaces.containsKey(keyspaceId); + } + + public boolean hasProperty(AccessibleByName source, String name) { + if (source instanceof Row) { + return ((Row) source).getColumnDefinitions().contains(name); + } else if (source instanceof UdtValue) { + return ((UdtValue) source).getType().contains(name); + } else if (source instanceof BoundStatement) { + return ((BoundStatement) source) + .getPreparedStatement() + .getVariableDefinitions() + .contains(name); + } else if (source instanceof BoundStatementBuilder) { + return ((BoundStatementBuilder) source) + .getPreparedStatement() + .getVariableDefinitions() + .contains(name); + } + // other implementations: assume the property is present + return true; + } } diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/DependencyCheckTest.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/DependencyCheckTest.java new file mode 100644 index 00000000000..03c1e5bb24f --- /dev/null +++ b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/DependencyCheckTest.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.mapper; + +import com.datastax.dse.driver.internal.DependencyCheckTestBase; +import java.nio.file.Path; +import java.nio.file.Paths; + +public class DependencyCheckTest extends DependencyCheckTestBase { + + @Override + protected Path getDepsTxtPath() { + return Paths.get( + getBaseResourcePathString(), + "target", + "classes", + "com", + "datastax", + "dse", + "driver", + "internal", + "mapper", + "deps.txt"); + } +} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSetTckTest.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSetTckTest.java new file mode 100644 index 00000000000..efd223b1314 --- /dev/null +++ b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MappedReactiveResultSetTckTest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.mapper.reactive; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.internal.core.cql.reactive.DefaultReactiveResultSet; +import com.datastax.dse.driver.internal.mapper.reactive.DefaultMappedReactiveResultSet; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import io.reactivex.Flowable; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +public class MappedReactiveResultSetTckTest extends PublisherVerification { + + public MappedReactiveResultSetTckTest() { + super(new TestEnvironment()); + } + + @Override + public Publisher createPublisher(long elements) { + // The TCK usually requests between 0 and 20 items, or Long.MAX_VALUE. + // Past 3 elements it never checks how many elements have been effectively produced, + // so we can safely cap at, say, 20. + int effective = (int) Math.min(elements, 20L); + return new DefaultMappedReactiveResultSet<>( + new DefaultReactiveResultSet(() -> createResults(effective)), row -> row.getInt(0)); + } + + @Override + public Publisher createFailedPublisher() { + DefaultReactiveResultSet publisher = new DefaultReactiveResultSet(() -> createResults(1)); + // Since our publisher does not support multiple + // subscriptions, we use that to create a failed publisher. + publisher.subscribe(new TestSubscriber<>()); + return new DefaultMappedReactiveResultSet<>(publisher, row -> row.getInt(0)); + } + + private static CompletableFuture createResults(int elements) { + CompletableFuture previous = null; + if (elements > 0) { + // create pages of 5 elements each to exercise pagination + List pages = + Flowable.range(0, elements).buffer(5).map(List::size).toList().blockingGet(); + Collections.reverse(pages); + for (Integer size : pages) { + List rows = + Flowable.range(0, size) + .map( + i -> { + Row row = mock(Row.class); + when(row.getInt(0)).thenReturn(i); + return row; + }) + .toList() + .blockingGet(); + CompletableFuture future = new CompletableFuture<>(); + future.complete(new MockAsyncResultSet(rows, previous)); + previous = future; + } + } else { + previous = new CompletableFuture<>(); + previous.complete(new MockAsyncResultSet(0, null)); + } + return previous; + } +} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockAsyncResultSet.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockAsyncResultSet.java new file mode 100644 index 00000000000..849839b7904 --- /dev/null +++ b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockAsyncResultSet.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.mapper.reactive; + +import static org.mockito.Mockito.mock; + +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import com.datastax.oss.driver.api.core.cql.Row; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class MockAsyncResultSet implements AsyncResultSet { + + private final List rows; + private final Iterator iterator; + private final CompletionStage nextPage; + private final ExecutionInfo executionInfo = mock(ExecutionInfo.class); + private final ColumnDefinitions columnDefinitions = mock(ColumnDefinitions.class); + private int remaining; + + public MockAsyncResultSet(int size, CompletionStage nextPage) { + this(IntStream.range(0, size).boxed().map(MockRow::new).collect(Collectors.toList()), nextPage); + } + + public MockAsyncResultSet(List rows, CompletionStage nextPage) { + this.rows = rows; + iterator = rows.iterator(); + remaining = rows.size(); + this.nextPage = nextPage; + } + + @Override + public Row one() { + Row next = iterator.next(); + remaining--; + return next; + } + + @Override + public int remaining() { + return remaining; + } + + @NonNull + @Override + public List currentPage() { + return new ArrayList<>(rows); + } + + @Override + public boolean hasMorePages() { + return nextPage != null; + } + + @NonNull + @Override + public CompletionStage fetchNextPage() throws IllegalStateException { + return nextPage; + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return columnDefinitions; + } + + @NonNull + @Override + public ExecutionInfo getExecutionInfo() { + return executionInfo; + } + + @Override + public boolean wasApplied() { + return true; + } +} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockRow.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockRow.java new file mode 100644 index 00000000000..0c3ead94349 --- /dev/null +++ b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/MockRow.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.mapper.reactive; + +import static org.mockito.Mockito.mock; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.DefaultProtocolVersion; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.cql.ColumnDefinitions; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.detach.AttachmentPoint; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.cql.EmptyColumnDefinitions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; + +class MockRow implements Row { + + private int index; + + MockRow(int index) { + this.index = index; + } + + @Override + public int size() { + return 0; + } + + @NonNull + @Override + public CodecRegistry codecRegistry() { + return mock(CodecRegistry.class); + } + + @NonNull + @Override + public ProtocolVersion protocolVersion() { + return DefaultProtocolVersion.V4; + } + + @NonNull + @Override + public ColumnDefinitions getColumnDefinitions() { + return EmptyColumnDefinitions.INSTANCE; + } + + @NonNull + @Override + public List allIndicesOf(@NonNull String name) { + return Collections.singletonList(0); + } + + @Override + public int firstIndexOf(@NonNull String name) { + return 0; + } + + @NonNull + @Override + public List allIndicesOf(@NonNull CqlIdentifier id) { + return Collections.singletonList(0); + } + + @Override + public int firstIndexOf(@NonNull CqlIdentifier id) { + return 0; + } + + @NonNull + @Override + public DataType getType(int i) { + return DataTypes.INT; + } + + @NonNull + @Override + public DataType getType(@NonNull String name) { + return DataTypes.INT; + } + + @NonNull + @Override + public DataType getType(@NonNull CqlIdentifier id) { + return DataTypes.INT; + } + + @Override + public ByteBuffer getBytesUnsafe(int i) { + return null; + } + + @Override + public boolean isDetached() { + return false; + } + + @Override + public void attach(@NonNull AttachmentPoint attachmentPoint) {} + + // equals and hashCode required for TCK tests that check that two subscribers + // receive the exact same set of items. + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MockRow)) { + return false; + } + MockRow mockRow = (MockRow) o; + return index == mockRow.index; + } + + @Override + public int hashCode() { + return index; + } +} diff --git a/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/TestSubscriber.java b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/TestSubscriber.java new file mode 100644 index 00000000000..6886b9a7622 --- /dev/null +++ b/mapper-runtime/src/test/java/com/datastax/dse/driver/api/mapper/reactive/TestSubscriber.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.mapper.reactive; + +import static org.assertj.core.api.Fail.fail; + +import com.datastax.oss.driver.shaded.guava.common.util.concurrent.Uninterruptibles; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +public class TestSubscriber implements Subscriber { + + private final List elements = new ArrayList<>(); + private final CountDownLatch latch = new CountDownLatch(1); + private Subscription subscription; + private Throwable error; + + @Override + public void onSubscribe(Subscription s) { + if (subscription != null) { + throw new AssertionError("already subscribed"); + } + subscription = s; + s.request(Long.MAX_VALUE); + } + + @Override + public void onNext(T t) { + elements.add(t); + } + + @Override + public void onError(Throwable t) { + error = t; + latch.countDown(); + } + + @Override + public void onComplete() { + latch.countDown(); + } + + @Nullable + public Throwable getError() { + return error; + } + + @NonNull + public List getElements() { + return elements; + } + + public void awaitTermination() { + if (!Uninterruptibles.awaitUninterruptibly(latch, 1, TimeUnit.MINUTES)) { + fail("subscriber not terminated"); + } + } +} diff --git a/mapper-runtime/src/test/resources/project.properties b/mapper-runtime/src/test/resources/project.properties new file mode 100644 index 00000000000..66eab90b6e4 --- /dev/null +++ b/mapper-runtime/src/test/resources/project.properties @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +project.basedir=${basedir} \ No newline at end of file diff --git a/metrics/micrometer/pom.xml b/metrics/micrometer/pom.xml new file mode 100644 index 00000000000..37ba8556a53 --- /dev/null +++ b/metrics/micrometer/pom.xml @@ -0,0 +1,152 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + ../../ + + java-driver-metrics-micrometer + bundle + Apache Cassandra Java Driver - Metrics - Micrometer + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + + + + io.micrometer + micrometer-core + + + org.apache.cassandra + java-driver-core + + + io.dropwizard.metrics + metrics-core + + + org.hdrhistogram + HdrHistogram + + + + + com.github.stephenc.jcip + jcip-annotations + provided + + + com.github.spotbugs + spotbugs-annotations + provided + + + ch.qos.logback + logback-classic + test + + + junit + junit + test + + + com.tngtech.java + junit-dataprovider + test + + + org.assertj + assertj-core + test + + + org.mockito + mockito-core + test + + + org.apache.cassandra + java-driver-core + test + test-jar + + + + + + src/main/resources + + + ${project.basedir}/../.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + + maven-jar-plugin + + + + javadoc-jar + package + + jar + + + javadoc + + ** + + + + + + + org.revapi + revapi-maven-plugin + + + true + + + + + diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricUpdater.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricUpdater.java new file mode 100644 index 00000000000..b9507c8b7cf --- /dev/null +++ b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricUpdater.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import edu.umd.cs.findbugs.annotations.Nullable; +import io.micrometer.core.instrument.Counter; +import io.micrometer.core.instrument.DistributionSummary; +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.Meter; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tag; +import io.micrometer.core.instrument.Timer; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public abstract class MicrometerMetricUpdater extends AbstractMetricUpdater { + + protected final MeterRegistry registry; + + protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); + + protected MicrometerMetricUpdater( + InternalDriverContext context, Set enabledMetrics, MeterRegistry registry) { + super(context, enabledMetrics); + this.registry = registry; + } + + @Override + public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { + if (isEnabled(metric, profileName)) { + getOrCreateCounterFor(metric).increment(amount); + } + } + + @Override + public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { + if (isEnabled(metric, profileName)) { + getOrCreateDistributionSummaryFor(metric).record(value); + } + } + + @Override + public void markMeter(MetricT metric, @Nullable String profileName, long amount) { + if (isEnabled(metric, profileName)) { + // There is no meter type in Micrometer, so use a counter + getOrCreateCounterFor(metric).increment(amount); + } + } + + @Override + public void updateTimer( + MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { + if (isEnabled(metric, profileName)) { + getOrCreateTimerFor(metric).record(duration, unit); + } + } + + @Override + public void clearMetrics() { + for (Meter metric : metrics.values()) { + registry.remove(metric); + } + metrics.clear(); + } + + protected abstract MetricId getMetricId(MetricT metric); + + protected void initializeGauge( + MetricT metric, DriverExecutionProfile profile, Supplier supplier) { + if (isEnabled(metric, profile.getName())) { + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + return Gauge.builder(id.getName(), supplier).tags(tags).register(registry); + }); + } + } + + protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { + if (isEnabled(metric, profile.getName())) { + getOrCreateCounterFor(metric); + } + } + + protected void initializeTimer(MetricT metric, DriverExecutionProfile profile) { + if (isEnabled(metric, profile.getName())) { + getOrCreateTimerFor(metric); + } + } + + protected Counter getOrCreateCounterFor(MetricT metric) { + return (Counter) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + return Counter.builder(id.getName()).tags(tags).register(registry); + }); + } + + protected DistributionSummary getOrCreateDistributionSummaryFor(MetricT metric) { + return (DistributionSummary) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + DistributionSummary.Builder builder = + DistributionSummary.builder(id.getName()).tags(tags); + builder = configureDistributionSummary(builder, metric, id); + return builder.register(registry); + }); + } + + protected Timer getOrCreateTimerFor(MetricT metric) { + return (Timer) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Iterable tags = MicrometerTags.toMicrometerTags(id.getTags()); + Timer.Builder builder = Timer.builder(id.getName()).tags(tags); + builder = configureTimer(builder, metric, id); + return builder.register(registry); + }); + } + + protected Timer.Builder configureTimer(Timer.Builder builder, MetricT metric, MetricId id) { + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + if (profile.getBoolean(DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS)) { + builder.publishPercentileHistogram(); + } + return builder; + } + + @SuppressWarnings("unused") + protected DistributionSummary.Builder configureDistributionSummary( + DistributionSummary.Builder builder, MetricT metric, MetricId id) { + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + if (profile.getBoolean(DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS)) { + builder.publishPercentileHistogram(); + } + return builder; + } + + static double[] toDoubleArray(List doubleList) { + return doubleList.stream().mapToDouble(Double::doubleValue).toArray(); + } + + static void configurePercentilesPublishIfDefined( + Timer.Builder builder, DriverExecutionProfile profile, DriverOption driverOption) { + if (profile.isDefined(driverOption)) { + builder.publishPercentiles(toDoubleArray(profile.getDoubleList(driverOption))); + } + } +} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactory.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactory.java new file mode 100644 index 00000000000..83cd0f80b02 --- /dev/null +++ b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactory.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; +import com.datastax.oss.driver.internal.core.metrics.MetricPaths; +import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.NoopSessionMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; +import io.micrometer.core.instrument.MeterRegistry; +import io.netty.util.concurrent.EventExecutor; +import java.util.Optional; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class MicrometerMetricsFactory implements MetricsFactory { + + private static final Logger LOG = LoggerFactory.getLogger(MicrometerMetricsFactory.class); + + private final InternalDriverContext context; + private final Set enabledNodeMetrics; + private final MeterRegistry registry; + private final SessionMetricUpdater sessionUpdater; + + public MicrometerMetricsFactory(DriverContext context) { + this.context = (InternalDriverContext) context; + String logPrefix = context.getSessionName(); + DriverExecutionProfile config = context.getConfig().getDefaultProfile(); + Set enabledSessionMetrics = + MetricPaths.parseSessionMetricPaths( + config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); + this.enabledNodeMetrics = + MetricPaths.parseNodeMetricPaths( + config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); + if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { + LOG.debug("[{}] All metrics are disabled, Session.getMetrics will be empty", logPrefix); + this.registry = null; + this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; + } else { + // try to get the metric registry from the context + Object possibleMetricRegistry = this.context.getMetricRegistry(); + if (possibleMetricRegistry == null) { + // metrics are enabled, but a metric registry was not supplied to the context + // use the global registry + possibleMetricRegistry = io.micrometer.core.instrument.Metrics.globalRegistry; + } + if (possibleMetricRegistry instanceof MeterRegistry) { + this.registry = (MeterRegistry) possibleMetricRegistry; + this.sessionUpdater = + new MicrometerSessionMetricUpdater(this.context, enabledSessionMetrics, this.registry); + } else { + // Metrics are enabled, but the registry object is not an expected type + throw new IllegalArgumentException( + "Unexpected Metrics registry object. Expected registry object to be of type '" + + MeterRegistry.class.getName() + + "', but was '" + + possibleMetricRegistry.getClass().getName() + + "'"); + } + if (!enabledNodeMetrics.isEmpty()) { + EventExecutor adminEventExecutor = + this.context.getNettyOptions().adminEventExecutorGroup().next(); + this.context + .getEventBus() + .register( + NodeStateEvent.class, + RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); + } + } + } + + @Override + public Optional getMetrics() { + return Optional.empty(); + } + + @Override + public SessionMetricUpdater getSessionUpdater() { + return sessionUpdater; + } + + @Override + public NodeMetricUpdater newNodeUpdater(Node node) { + if (registry == null) { + return NoopNodeMetricUpdater.INSTANCE; + } else { + return new MicrometerNodeMetricUpdater(node, context, enabledNodeMetrics, registry); + } + } + + protected void processNodeStateEvent(NodeStateEvent event) { + if (event.newState == NodeState.DOWN + || event.newState == NodeState.FORCED_DOWN + || event.newState == null) { + // node is DOWN or REMOVED + ((MicrometerNodeMetricUpdater) event.node.getMetricUpdater()).startMetricsExpirationTimeout(); + } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { + // node is UP or ADDED + ((MicrometerNodeMetricUpdater) event.node.getMetricUpdater()) + .cancelMetricsExpirationTimeout(); + } + } +} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdater.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdater.java new file mode 100644 index 00000000000..cb8303de965 --- /dev/null +++ b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdater.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Timer; +import java.time.Duration; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class MicrometerNodeMetricUpdater extends MicrometerMetricUpdater + implements NodeMetricUpdater { + + private final Node node; + + public MicrometerNodeMetricUpdater( + Node node, + InternalDriverContext context, + Set enabledMetrics, + MeterRegistry registry) { + super(context, enabledMetrics, registry); + this.node = node; + + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + + initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); + initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); + initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); + initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); + + initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); + initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); + initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); + initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); + initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); + initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); + initializeCounter(DefaultNodeMetric.RETRIES, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); + initializeCounter(DefaultNodeMetric.IGNORES, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); + initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); + initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); + initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); + + initializeTimer(DefaultNodeMetric.CQL_MESSAGES, profile); + initializeTimer(DseNodeMetric.GRAPH_MESSAGES, profile); + } + + @Override + protected MetricId getMetricId(NodeMetric metric) { + return context.getMetricIdGenerator().nodeMetricId(node, metric); + } + + @Override + protected void startMetricsExpirationTimeout() { + super.startMetricsExpirationTimeout(); + } + + @Override + protected void cancelMetricsExpirationTimeout() { + super.cancelMetricsExpirationTimeout(); + } + + @Override + protected Timer.Builder configureTimer(Timer.Builder builder, NodeMetric metric, MetricId id) { + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + super.configureTimer(builder, metric, id); + if (metric == DefaultNodeMetric.CQL_MESSAGES) { + builder + .minimumExpectedValue( + profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST)) + .maximumExpectedValue( + profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST)) + .serviceLevelObjectives( + profile.isDefined(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO) + ? profile + .getDurationList(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO) + .toArray(new Duration[0]) + : null) + .percentilePrecision( + profile.getInt(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS)); + + configurePercentilesPublishIfDefined( + builder, profile, DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES); + } else if (metric == DseNodeMetric.GRAPH_MESSAGES) { + builder + .minimumExpectedValue( + profile.getDuration(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST)) + .maximumExpectedValue( + profile.getDuration(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST)) + .serviceLevelObjectives( + profile.isDefined(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO) + ? profile + .getDurationList(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO) + .toArray(new Duration[0]) + : null) + .percentilePrecision(profile.getInt(DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS)); + + configurePercentilesPublishIfDefined( + builder, profile, DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES); + } + return builder; + } +} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdater.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdater.java new file mode 100644 index 00000000000..559054ab510 --- /dev/null +++ b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdater.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Timer; +import java.time.Duration; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; + +@ThreadSafe +public class MicrometerSessionMetricUpdater extends MicrometerMetricUpdater + implements SessionMetricUpdater { + + public MicrometerSessionMetricUpdater( + InternalDriverContext context, Set enabledMetrics, MeterRegistry registry) { + super(context, enabledMetrics, registry); + + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + + initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); + initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); + initializeGauge( + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); + + initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); + initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); + initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); + + initializeTimer(DefaultSessionMetric.CQL_REQUESTS, profile); + initializeTimer(DefaultSessionMetric.THROTTLING_DELAY, profile); + initializeTimer(DseSessionMetric.CONTINUOUS_CQL_REQUESTS, profile); + initializeTimer(DseSessionMetric.GRAPH_REQUESTS, profile); + } + + @Override + protected MetricId getMetricId(SessionMetric metric) { + return context.getMetricIdGenerator().sessionMetricId(metric); + } + + @Override + protected Timer.Builder configureTimer(Timer.Builder builder, SessionMetric metric, MetricId id) { + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + super.configureTimer(builder, metric, id); + if (metric == DefaultSessionMetric.CQL_REQUESTS) { + builder + .minimumExpectedValue( + profile.getDuration(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST)) + .maximumExpectedValue( + profile.getDuration(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST)) + .serviceLevelObjectives( + profile.isDefined(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO) + ? profile + .getDurationList(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO) + .toArray(new Duration[0]) + : null) + .percentilePrecision( + profile.isDefined(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS) + ? profile.getInt(DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS) + : null); + + configurePercentilesPublishIfDefined( + builder, profile, DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES); + } else if (metric == DefaultSessionMetric.THROTTLING_DELAY) { + builder + .minimumExpectedValue( + profile.getDuration(DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST)) + .maximumExpectedValue( + profile.getDuration(DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST)) + .serviceLevelObjectives( + profile.isDefined(DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO) + ? profile + .getDurationList(DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO) + .toArray(new Duration[0]) + : null) + .percentilePrecision( + profile.isDefined(DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS) + ? profile.getInt(DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS) + : null); + + configurePercentilesPublishIfDefined( + builder, profile, DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES); + } else if (metric == DseSessionMetric.CONTINUOUS_CQL_REQUESTS) { + builder + .minimumExpectedValue( + profile.getDuration( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST)) + .maximumExpectedValue( + profile.getDuration( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST)) + .serviceLevelObjectives( + profile.isDefined(DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO) + ? profile + .getDurationList( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO) + .toArray(new Duration[0]) + : null) + .percentilePrecision( + profile.isDefined( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS) + ? profile.getInt( + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS) + : null); + + configurePercentilesPublishIfDefined( + builder, + profile, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES); + } else if (metric == DseSessionMetric.GRAPH_REQUESTS) { + builder + .minimumExpectedValue( + profile.getDuration(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST)) + .maximumExpectedValue( + profile.getDuration(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST)) + .serviceLevelObjectives( + profile.isDefined(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO) + ? profile + .getDurationList(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO) + .toArray(new Duration[0]) + : null) + .percentilePrecision( + profile.isDefined(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS) + ? profile.getInt(DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS) + : null); + + configurePercentilesPublishIfDefined( + builder, profile, DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES); + } + return builder; + } +} diff --git a/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerTags.java b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerTags.java new file mode 100644 index 00000000000..10c7c821ae5 --- /dev/null +++ b/metrics/micrometer/src/main/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerTags.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import io.micrometer.core.instrument.Tag; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +public class MicrometerTags { + + public static Iterable toMicrometerTags(Map tags) { + List micrometerTags = new ArrayList<>(tags.size()); + for (Entry entry : tags.entrySet()) { + micrometerTags.add(Tag.of(entry.getKey(), entry.getValue())); + } + return micrometerTags; + } +} diff --git a/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/native-image.properties b/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/native-image.properties new file mode 100644 index 00000000000..fdbf4ccc7c2 --- /dev/null +++ b/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/native-image.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +Args = -H:ReflectionConfigurationResources=${.}/reflection.json diff --git a/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/reflection.json b/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/reflection.json new file mode 100644 index 00000000000..638cac60af1 --- /dev/null +++ b/metrics/micrometer/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-micrometer/reflection.json @@ -0,0 +1,6 @@ +[ + { + "name": "com.datastax.oss.driver.internal.metrics.micrometer.MicrometerMetricsFactory", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + } +] diff --git a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactoryTest.java b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactoryTest.java new file mode 100644 index 00000000000..586b74d72c3 --- /dev/null +++ b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerMetricsFactoryTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import io.micrometer.core.instrument.MeterRegistry; +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import org.junit.Test; + +public class MicrometerMetricsFactoryTest { + + @Test + public void should_throw_if_wrong_or_missing_registry_type() { + // given + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + List enabledMetrics = + Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); + // when + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getConfig()).thenReturn(config); + when(context.getSessionName()).thenReturn("MockSession"); + // registry object is not a registry type + when(context.getMetricRegistry()).thenReturn(Integer.MAX_VALUE); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofHours(1)); + when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) + .thenReturn(enabledMetrics); + // then + try { + new MicrometerMetricsFactory(context); + fail( + "MetricsFactory should require correct registry object type: " + + MeterRegistry.class.getName()); + } catch (IllegalArgumentException iae) { + assertThat(iae.getMessage()) + .isEqualTo( + "Unexpected Metrics registry object. " + + "Expected registry object to be of type '%s', but was '%s'", + MeterRegistry.class.getName(), Integer.class.getName()); + } + } +} diff --git a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdaterTest.java b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdaterTest.java new file mode 100644 index 00000000000..594c4166e98 --- /dev/null +++ b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerNodeMetricUpdaterTest.java @@ -0,0 +1,278 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.DefaultMetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; +import com.datastax.oss.driver.internal.core.util.LoggerTest; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.micrometer.core.instrument.Timer; +import io.micrometer.core.instrument.distribution.HistogramSnapshot; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class MicrometerNodeMetricUpdaterTest { + + private static final MetricId METRIC_ID = new DefaultMetricId("irrelevant", ImmutableMap.of()); + + @Test + public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { + // given + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + MetricIdGenerator generator = mock(MetricIdGenerator.class); + Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); + Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getMetricIdGenerator()).thenReturn(generator); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(expireAfter); + when(generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES)).thenReturn(METRIC_ID); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST)) + .thenReturn(Duration.ofSeconds(10)); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST)) + .thenReturn(Duration.ofMillis(1)); + when(profile.getInt(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS)).thenReturn(5); + + MicrometerNodeMetricUpdater updater = + new MicrometerNodeMetricUpdater(node, context, enabledMetrics, new SimpleMeterRegistry()); + + // then + assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); + verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains( + String.format( + "[prefix] Value too low for %s: %s. Forcing to %s instead.", + DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), + expireAfter, + AbstractMetricUpdater.MIN_EXPIRE_AFTER)); + } + + @Test + @UseDataProvider(value = "acceptableEvictionTimes") + public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( + Duration expireAfter) { + // given + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + MetricIdGenerator generator = mock(MetricIdGenerator.class); + Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getMetricIdGenerator()).thenReturn(generator); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(expireAfter); + when(generator.nodeMetricId(node, DefaultNodeMetric.CQL_MESSAGES)).thenReturn(METRIC_ID); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST)) + .thenReturn(Duration.ofSeconds(10)); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST)) + .thenReturn(Duration.ofMillis(1)); + when(profile.getInt(DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS)).thenReturn(5); + + MicrometerNodeMetricUpdater updater = + new MicrometerNodeMetricUpdater(node, context, enabledMetrics, new SimpleMeterRegistry()); + + // then + assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); + verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); + } + + @DataProvider + public static Object[][] acceptableEvictionTimes() { + return new Object[][] { + {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, + {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} + }; + } + + @Test + @UseDataProvider(value = "timerMetrics") + public void should_create_timer( + NodeMetric metric, + DriverOption lowest, + DriverOption highest, + DriverOption digits, + DriverOption sla, + DriverOption percentiles) { + // given + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + MetricIdGenerator generator = mock(MetricIdGenerator.class); + Set enabledMetrics = Collections.singleton(metric); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getMetricIdGenerator()).thenReturn(generator); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofHours(1)); + when(profile.getDuration(lowest)).thenReturn(Duration.ofMillis(10)); + when(profile.getDuration(highest)).thenReturn(Duration.ofSeconds(1)); + when(profile.getInt(digits)).thenReturn(5); + when(profile.isDefined(sla)).thenReturn(true); + when(profile.getDurationList(sla)) + .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); + when(profile.isDefined(percentiles)).thenReturn(true); + when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); + when(generator.nodeMetricId(node, metric)).thenReturn(METRIC_ID); + + SimpleMeterRegistry registry = spy(new SimpleMeterRegistry()); + MicrometerNodeMetricUpdater updater = + new MicrometerNodeMetricUpdater(node, context, enabledMetrics, registry); + + for (int i = 0; i < 10; i++) { + updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); + } + + // then + Timer timer = registry.find(METRIC_ID.getName()).timer(); + assertThat(timer).isNotNull(); + assertThat(timer.count()).isEqualTo(10); + HistogramSnapshot snapshot = timer.takeSnapshot(); + assertThat(snapshot.histogramCounts()).hasSize(2); + assertThat(snapshot.percentileValues()).hasSize(3); + assertThat(snapshot.percentileValues()) + .satisfiesExactlyInAnyOrder( + valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.75), + valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.95), + valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.99)); + } + + @Test + @UseDataProvider(value = "timerMetrics") + public void should_not_create_sla_percentiles( + NodeMetric metric, + DriverOption lowest, + DriverOption highest, + DriverOption digits, + DriverOption sla, + DriverOption percentiles) { + // given + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + MetricIdGenerator generator = mock(MetricIdGenerator.class); + Set enabledMetrics = Collections.singleton(metric); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getMetricIdGenerator()).thenReturn(generator); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofHours(1)); + when(profile.getDuration(lowest)).thenReturn(Duration.ofMillis(10)); + when(profile.getDuration(highest)).thenReturn(Duration.ofSeconds(1)); + when(profile.getInt(digits)).thenReturn(5); + when(profile.isDefined(sla)).thenReturn(false); + when(profile.getDurationList(sla)) + .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); + when(profile.isDefined(percentiles)).thenReturn(false); + when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); + when(generator.nodeMetricId(node, metric)).thenReturn(METRIC_ID); + + SimpleMeterRegistry registry = spy(new SimpleMeterRegistry()); + MicrometerNodeMetricUpdater updater = + new MicrometerNodeMetricUpdater(node, context, enabledMetrics, registry); + + for (int i = 0; i < 10; i++) { + updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); + } + + // then + Timer timer = registry.find(METRIC_ID.getName()).timer(); + assertThat(timer).isNotNull(); + assertThat(timer.count()).isEqualTo(10); + HistogramSnapshot snapshot = timer.takeSnapshot(); + assertThat(snapshot.histogramCounts()).hasSize(0); + assertThat(snapshot.percentileValues()).hasSize(0); + } + + @DataProvider + public static Object[][] timerMetrics() { + return new Object[][] { + { + DefaultNodeMetric.CQL_MESSAGES, + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_LOWEST, + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_HIGHEST, + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_DIGITS, + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_SLO, + DefaultDriverOption.METRICS_NODE_CQL_MESSAGES_PUBLISH_PERCENTILES, + }, + { + DseNodeMetric.GRAPH_MESSAGES, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_LOWEST, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_HIGHEST, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_DIGITS, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_SLO, + DseDriverOption.METRICS_NODE_GRAPH_MESSAGES_PUBLISH_PERCENTILES, + }, + }; + } +} diff --git a/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdaterTest.java b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdaterTest.java new file mode 100644 index 00000000000..0deb377457a --- /dev/null +++ b/metrics/micrometer/src/test/java/com/datastax/oss/driver/internal/metrics/micrometer/MicrometerSessionMetricUpdaterTest.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.micrometer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.DefaultMetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.MetricIdGenerator; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.micrometer.core.instrument.Timer; +import io.micrometer.core.instrument.distribution.HistogramSnapshot; +import io.micrometer.core.instrument.simple.SimpleMeterRegistry; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class MicrometerSessionMetricUpdaterTest { + + private static final MetricId METRIC_ID = new DefaultMetricId("irrelevant", ImmutableMap.of()); + + @Test + @UseDataProvider(value = "timerMetrics") + public void should_create_timer( + SessionMetric metric, + DriverOption lowest, + DriverOption highest, + DriverOption digits, + DriverOption sla, + DriverOption percentiles) { + // given + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + MetricIdGenerator generator = mock(MetricIdGenerator.class); + Set enabledMetrics = Collections.singleton(metric); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getMetricIdGenerator()).thenReturn(generator); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofHours(1)); + when(profile.getDuration(lowest)).thenReturn(Duration.ofMillis(10)); + when(profile.getDuration(highest)).thenReturn(Duration.ofSeconds(1)); + when(profile.getInt(digits)).thenReturn(5); + when(profile.isDefined(sla)).thenReturn(true); + when(profile.getDurationList(sla)) + .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); + when(profile.isDefined(percentiles)).thenReturn(true); + when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); + when(generator.sessionMetricId(metric)).thenReturn(METRIC_ID); + + SimpleMeterRegistry registry = spy(new SimpleMeterRegistry()); + MicrometerSessionMetricUpdater updater = + new MicrometerSessionMetricUpdater(context, enabledMetrics, registry); + + for (int i = 0; i < 10; i++) { + updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); + } + + // then + Timer timer = registry.find(METRIC_ID.getName()).timer(); + assertThat(timer).isNotNull(); + assertThat(timer.count()).isEqualTo(10); + HistogramSnapshot snapshot = timer.takeSnapshot(); + assertThat(snapshot.histogramCounts()).hasSize(2); + assertThat(snapshot.percentileValues()).hasSize(3); + assertThat(snapshot.percentileValues()) + .satisfiesExactlyInAnyOrder( + valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.75), + valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.95), + valuePercentile -> assertThat(valuePercentile.percentile()).isEqualTo(0.99)); + } + + @Test + @UseDataProvider(value = "timerMetrics") + public void should_not_create_sla_percentiles( + SessionMetric metric, + DriverOption lowest, + DriverOption highest, + DriverOption digits, + DriverOption sla, + DriverOption percentiles) { + // given + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + MetricIdGenerator generator = mock(MetricIdGenerator.class); + Set enabledMetrics = Collections.singleton(metric); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getMetricIdGenerator()).thenReturn(generator); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(Duration.ofHours(1)); + when(profile.isDefined(sla)).thenReturn(false); + when(profile.getDurationList(sla)) + .thenReturn(Arrays.asList(Duration.ofMillis(100), Duration.ofMillis(500))); + when(profile.getBoolean(DefaultDriverOption.METRICS_GENERATE_AGGREGABLE_HISTOGRAMS)) + .thenReturn(true); + when(profile.isDefined(percentiles)).thenReturn(false); + when(profile.getDoubleList(percentiles)).thenReturn(Arrays.asList(0.75, 0.95, 0.99)); + when(generator.sessionMetricId(metric)).thenReturn(METRIC_ID); + + SimpleMeterRegistry registry = new SimpleMeterRegistry(); + MicrometerSessionMetricUpdater updater = + new MicrometerSessionMetricUpdater(context, enabledMetrics, registry); + + for (int i = 0; i < 10; i++) { + updater.updateTimer(metric, null, 100, TimeUnit.MILLISECONDS); + } + + // then + Timer timer = registry.find(METRIC_ID.getName()).timer(); + assertThat(timer).isNotNull(); + assertThat(timer.count()).isEqualTo(10); + HistogramSnapshot snapshot = timer.takeSnapshot(); + assertThat(snapshot.histogramCounts()).hasSize(0); + assertThat(snapshot.percentileValues()).hasSize(0); + } + + @DataProvider + public static Object[][] timerMetrics() { + return new Object[][] { + { + DefaultSessionMetric.CQL_REQUESTS, + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_LOWEST, + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_HIGHEST, + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_DIGITS, + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_SLO, + DefaultDriverOption.METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES, + }, + { + DseSessionMetric.GRAPH_REQUESTS, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_LOWEST, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_HIGHEST, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_DIGITS, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_SLO, + DseDriverOption.METRICS_SESSION_GRAPH_REQUESTS_PUBLISH_PERCENTILES, + }, + { + DseSessionMetric.CONTINUOUS_CQL_REQUESTS, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_LOWEST, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_HIGHEST, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_DIGITS, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_SLO, + DseDriverOption.CONTINUOUS_PAGING_METRICS_SESSION_CQL_REQUESTS_PUBLISH_PERCENTILES + }, + { + DefaultSessionMetric.THROTTLING_DELAY, + DefaultDriverOption.METRICS_SESSION_THROTTLING_LOWEST, + DefaultDriverOption.METRICS_SESSION_THROTTLING_HIGHEST, + DefaultDriverOption.METRICS_SESSION_THROTTLING_DIGITS, + DefaultDriverOption.METRICS_SESSION_THROTTLING_SLO, + DefaultDriverOption.METRICS_SESSION_THROTTLING_PUBLISH_PERCENTILES + }, + }; + } +} diff --git a/metrics/microprofile/pom.xml b/metrics/microprofile/pom.xml new file mode 100644 index 00000000000..9893711d340 --- /dev/null +++ b/metrics/microprofile/pom.xml @@ -0,0 +1,157 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + ../../ + + java-driver-metrics-microprofile + bundle + Apache Cassandra Java Driver - Metrics - Microprofile + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + + + + org.eclipse.microprofile.metrics + microprofile-metrics-api + + + org.apache.cassandra + java-driver-core + + + io.dropwizard.metrics + metrics-core + + + org.hdrhistogram + HdrHistogram + + + + + com.github.stephenc.jcip + jcip-annotations + provided + + + com.github.spotbugs + spotbugs-annotations + provided + + + io.smallrye + smallrye-metrics + test + + + ch.qos.logback + logback-classic + test + + + junit + junit + test + + + com.tngtech.java + junit-dataprovider + test + + + org.assertj + assertj-core + test + + + org.mockito + mockito-core + test + + + org.apache.cassandra + java-driver-core + test + test-jar + + + + + + src/main/resources + + + ${project.basedir}/../.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + + maven-jar-plugin + + + + javadoc-jar + package + + jar + + + javadoc + + ** + + + + + + + org.revapi + revapi-maven-plugin + + + true + + + + + diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricUpdater.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricUpdater.java new file mode 100644 index 00000000000..df44fd69c51 --- /dev/null +++ b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricUpdater.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.time.Duration; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import net.jcip.annotations.ThreadSafe; +import org.eclipse.microprofile.metrics.Counter; +import org.eclipse.microprofile.metrics.Gauge; +import org.eclipse.microprofile.metrics.Histogram; +import org.eclipse.microprofile.metrics.Metadata; +import org.eclipse.microprofile.metrics.Meter; +import org.eclipse.microprofile.metrics.Metric; +import org.eclipse.microprofile.metrics.MetricID; +import org.eclipse.microprofile.metrics.MetricRegistry; +import org.eclipse.microprofile.metrics.MetricType; +import org.eclipse.microprofile.metrics.Tag; +import org.eclipse.microprofile.metrics.Timer; + +@ThreadSafe +public abstract class MicroProfileMetricUpdater extends AbstractMetricUpdater { + + protected final MetricRegistry registry; + + protected final ConcurrentMap metrics = new ConcurrentHashMap<>(); + + protected MicroProfileMetricUpdater( + InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { + super(context, enabledMetrics); + this.registry = registry; + } + + @Override + public void incrementCounter(MetricT metric, @Nullable String profileName, long amount) { + if (isEnabled(metric, profileName)) { + getOrCreateCounterFor(metric).inc(amount); + } + } + + @Override + public void updateHistogram(MetricT metric, @Nullable String profileName, long value) { + if (isEnabled(metric, profileName)) { + getOrCreateHistogramFor(metric).update(value); + } + } + + @Override + public void markMeter(MetricT metric, @Nullable String profileName, long amount) { + if (isEnabled(metric, profileName)) { + getOrCreateMeterFor(metric).mark(amount); + } + } + + @Override + public void updateTimer( + MetricT metric, @Nullable String profileName, long duration, TimeUnit unit) { + if (isEnabled(metric, profileName)) { + getOrCreateTimerFor(metric).update(Duration.ofNanos(unit.toNanos(duration))); + } + } + + @Override + public void clearMetrics() { + for (MetricT metric : metrics.keySet()) { + MetricId id = getMetricId(metric); + Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); + registry.remove(new MetricID(id.getName(), tags)); + } + metrics.clear(); + } + + protected abstract MetricId getMetricId(MetricT metric); + + protected void initializeGauge( + MetricT metric, DriverExecutionProfile profile, Gauge supplier) { + if (isEnabled(metric, profile.getName())) { + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + String name = id.getName(); + Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); + Metadata metadata = + Metadata.builder().withName(name).withType(MetricType.GAUGE).build(); + return registry.register(metadata, supplier, tags); + }); + } + } + + protected void initializeCounter(MetricT metric, DriverExecutionProfile profile) { + if (isEnabled(metric, profile.getName())) { + getOrCreateCounterFor(metric); + } + } + + protected void initializeTimer(MetricT metric, DriverExecutionProfile profile) { + if (isEnabled(metric, profile.getName())) { + getOrCreateTimerFor(metric); + } + } + + protected Counter getOrCreateCounterFor(MetricT metric) { + return (Counter) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); + return registry.counter(id.getName(), tags); + }); + } + + protected Meter getOrCreateMeterFor(MetricT metric) { + return (Meter) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); + return registry.meter(id.getName(), tags); + }); + } + + protected Histogram getOrCreateHistogramFor(MetricT metric) { + return (Histogram) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); + return registry.histogram(id.getName(), tags); + }); + } + + protected Timer getOrCreateTimerFor(MetricT metric) { + return (Timer) + metrics.computeIfAbsent( + metric, + m -> { + MetricId id = getMetricId(m); + Tag[] tags = MicroProfileTags.toMicroProfileTags(id.getTags()); + return registry.timer(id.getName(), tags); + }); + } +} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactory.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactory.java new file mode 100644 index 00000000000..e045b5fcb5e --- /dev/null +++ b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactory.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metadata.NodeState; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metadata.NodeStateEvent; +import com.datastax.oss.driver.internal.core.metrics.MetricPaths; +import com.datastax.oss.driver.internal.core.metrics.MetricsFactory; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.NoopNodeMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.NoopSessionMetricUpdater; +import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import com.datastax.oss.driver.internal.core.util.concurrent.RunOrSchedule; +import io.netty.util.concurrent.EventExecutor; +import java.util.Optional; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; +import org.eclipse.microprofile.metrics.MetricRegistry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@ThreadSafe +public class MicroProfileMetricsFactory implements MetricsFactory { + + private static final Logger LOG = LoggerFactory.getLogger(MicroProfileMetricsFactory.class); + + private final InternalDriverContext context; + private final Set enabledNodeMetrics; + private final MetricRegistry registry; + private final SessionMetricUpdater sessionUpdater; + + public MicroProfileMetricsFactory(DriverContext context) { + this.context = (InternalDriverContext) context; + String logPrefix = context.getSessionName(); + DriverExecutionProfile config = context.getConfig().getDefaultProfile(); + Set enabledSessionMetrics = + MetricPaths.parseSessionMetricPaths( + config.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED), logPrefix); + this.enabledNodeMetrics = + MetricPaths.parseNodeMetricPaths( + config.getStringList(DefaultDriverOption.METRICS_NODE_ENABLED), logPrefix); + if (enabledSessionMetrics.isEmpty() && enabledNodeMetrics.isEmpty()) { + LOG.debug("[{}] All metrics are disabled.", logPrefix); + this.registry = null; + this.sessionUpdater = NoopSessionMetricUpdater.INSTANCE; + } else { + Object possibleMetricRegistry = this.context.getMetricRegistry(); + if (possibleMetricRegistry == null) { + // metrics are enabled, but a metric registry was not supplied to the context + throw new IllegalArgumentException( + "No metric registry object found. Expected registry object to be of type '" + + MetricRegistry.class.getName() + + "'"); + } + if (possibleMetricRegistry instanceof MetricRegistry) { + this.registry = (MetricRegistry) possibleMetricRegistry; + this.sessionUpdater = + new MicroProfileSessionMetricUpdater( + this.context, enabledSessionMetrics, this.registry); + } else { + // Metrics are enabled, but the registry object is not an expected type + throw new IllegalArgumentException( + "Unexpected Metrics registry object. Expected registry object to be of type '" + + MetricRegistry.class.getName() + + "', but was '" + + possibleMetricRegistry.getClass().getName() + + "'"); + } + if (!enabledNodeMetrics.isEmpty()) { + EventExecutor adminEventExecutor = + this.context.getNettyOptions().adminEventExecutorGroup().next(); + this.context + .getEventBus() + .register( + NodeStateEvent.class, + RunOrSchedule.on(adminEventExecutor, this::processNodeStateEvent)); + } + } + } + + @Override + public Optional getMetrics() { + return Optional.empty(); + } + + @Override + public SessionMetricUpdater getSessionUpdater() { + return sessionUpdater; + } + + @Override + public NodeMetricUpdater newNodeUpdater(Node node) { + if (registry == null) { + return NoopNodeMetricUpdater.INSTANCE; + } else { + return new MicroProfileNodeMetricUpdater(node, context, enabledNodeMetrics, registry); + } + } + + protected void processNodeStateEvent(NodeStateEvent event) { + if (event.newState == NodeState.DOWN + || event.newState == NodeState.FORCED_DOWN + || event.newState == null) { + // node is DOWN or REMOVED + ((MicroProfileNodeMetricUpdater) event.node.getMetricUpdater()) + .startMetricsExpirationTimeout(); + } else if (event.newState == NodeState.UP || event.newState == NodeState.UNKNOWN) { + // node is UP or ADDED + ((MicroProfileNodeMetricUpdater) event.node.getMetricUpdater()) + .cancelMetricsExpirationTimeout(); + } + } +} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricUpdater.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricUpdater.java new file mode 100644 index 00000000000..8a2d235b59e --- /dev/null +++ b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricUpdater.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import com.datastax.dse.driver.api.core.metrics.DseNodeMetric; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.NodeMetricUpdater; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; +import org.eclipse.microprofile.metrics.MetricRegistry; + +@ThreadSafe +public class MicroProfileNodeMetricUpdater extends MicroProfileMetricUpdater + implements NodeMetricUpdater { + + private final Node node; + + public MicroProfileNodeMetricUpdater( + Node node, + InternalDriverContext context, + Set enabledMetrics, + MetricRegistry registry) { + super(context, enabledMetrics, registry); + this.node = node; + + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + + initializeGauge(DefaultNodeMetric.OPEN_CONNECTIONS, profile, node::getOpenConnections); + initializeGauge(DefaultNodeMetric.AVAILABLE_STREAMS, profile, () -> availableStreamIds(node)); + initializeGauge(DefaultNodeMetric.IN_FLIGHT, profile, () -> inFlightRequests(node)); + initializeGauge(DefaultNodeMetric.ORPHANED_STREAMS, profile, () -> orphanedStreamIds(node)); + + initializeCounter(DefaultNodeMetric.UNSENT_REQUESTS, profile); + initializeCounter(DefaultNodeMetric.ABORTED_REQUESTS, profile); + initializeCounter(DefaultNodeMetric.WRITE_TIMEOUTS, profile); + initializeCounter(DefaultNodeMetric.READ_TIMEOUTS, profile); + initializeCounter(DefaultNodeMetric.UNAVAILABLES, profile); + initializeCounter(DefaultNodeMetric.OTHER_ERRORS, profile); + initializeCounter(DefaultNodeMetric.RETRIES, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_ABORTED, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_READ_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_WRITE_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_UNAVAILABLE, profile); + initializeCounter(DefaultNodeMetric.RETRIES_ON_OTHER_ERROR, profile); + initializeCounter(DefaultNodeMetric.IGNORES, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_ABORTED, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_READ_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_WRITE_TIMEOUT, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_UNAVAILABLE, profile); + initializeCounter(DefaultNodeMetric.IGNORES_ON_OTHER_ERROR, profile); + initializeCounter(DefaultNodeMetric.SPECULATIVE_EXECUTIONS, profile); + initializeCounter(DefaultNodeMetric.CONNECTION_INIT_ERRORS, profile); + initializeCounter(DefaultNodeMetric.AUTHENTICATION_ERRORS, profile); + + initializeTimer(DefaultNodeMetric.CQL_MESSAGES, profile); + initializeTimer(DseNodeMetric.GRAPH_MESSAGES, profile); + } + + @Override + protected MetricId getMetricId(NodeMetric metric) { + return context.getMetricIdGenerator().nodeMetricId(node, metric); + } + + @Override + protected void startMetricsExpirationTimeout() { + super.startMetricsExpirationTimeout(); + } + + @Override + protected void cancelMetricsExpirationTimeout() { + super.cancelMetricsExpirationTimeout(); + } +} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileSessionMetricUpdater.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileSessionMetricUpdater.java new file mode 100644 index 00000000000..f3c906e4422 --- /dev/null +++ b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileSessionMetricUpdater.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import com.datastax.dse.driver.api.core.metrics.DseSessionMetric; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.SessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.MetricId; +import com.datastax.oss.driver.internal.core.metrics.SessionMetricUpdater; +import java.util.Set; +import net.jcip.annotations.ThreadSafe; +import org.eclipse.microprofile.metrics.MetricRegistry; + +@ThreadSafe +public class MicroProfileSessionMetricUpdater extends MicroProfileMetricUpdater + implements SessionMetricUpdater { + + public MicroProfileSessionMetricUpdater( + InternalDriverContext context, Set enabledMetrics, MetricRegistry registry) { + super(context, enabledMetrics, registry); + + DriverExecutionProfile profile = context.getConfig().getDefaultProfile(); + + initializeGauge(DefaultSessionMetric.CONNECTED_NODES, profile, this::connectedNodes); + initializeGauge(DefaultSessionMetric.THROTTLING_QUEUE_SIZE, profile, this::throttlingQueueSize); + initializeGauge( + DefaultSessionMetric.CQL_PREPARED_CACHE_SIZE, profile, this::preparedStatementCacheSize); + + initializeCounter(DefaultSessionMetric.CQL_CLIENT_TIMEOUTS, profile); + initializeCounter(DefaultSessionMetric.THROTTLING_ERRORS, profile); + initializeCounter(DseSessionMetric.GRAPH_CLIENT_TIMEOUTS, profile); + + initializeTimer(DefaultSessionMetric.CQL_REQUESTS, profile); + initializeTimer(DefaultSessionMetric.THROTTLING_DELAY, profile); + initializeTimer(DseSessionMetric.CONTINUOUS_CQL_REQUESTS, profile); + initializeTimer(DseSessionMetric.GRAPH_REQUESTS, profile); + } + + @Override + protected MetricId getMetricId(SessionMetric metric) { + return context.getMetricIdGenerator().sessionMetricId(metric); + } +} diff --git a/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileTags.java b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileTags.java new file mode 100644 index 00000000000..54ac9c77f98 --- /dev/null +++ b/metrics/microprofile/src/main/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileTags.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import org.eclipse.microprofile.metrics.Tag; + +public class MicroProfileTags { + + public static Tag[] toMicroProfileTags(Map tags) { + List micrometerTags = new ArrayList<>(tags.size()); + for (Entry entry : tags.entrySet()) { + micrometerTags.add(new Tag(entry.getKey(), entry.getValue())); + } + return micrometerTags.toArray(new Tag[0]); + } +} diff --git a/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/native-image.properties b/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/native-image.properties new file mode 100644 index 00000000000..fdbf4ccc7c2 --- /dev/null +++ b/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/native-image.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +Args = -H:ReflectionConfigurationResources=${.}/reflection.json diff --git a/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/reflection.json b/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/reflection.json new file mode 100644 index 00000000000..6d408897551 --- /dev/null +++ b/metrics/microprofile/src/main/resources/META-INF/native-image/com.datastax.oss/java-driver-metrics-microprofile/reflection.json @@ -0,0 +1,6 @@ +[ + { + "name": "com.datastax.oss.driver.internal.metrics.microprofile.MicroProfileMetricsFactory", + "methods": [ { "name": "", "parameterTypes": [ "com.datastax.oss.driver.api.core.context.DriverContext" ] } ] + } +] diff --git a/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactoryTest.java b/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactoryTest.java new file mode 100644 index 00000000000..f1fbfa2c907 --- /dev/null +++ b/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileMetricsFactoryTest.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import java.util.Collections; +import java.util.List; +import org.eclipse.microprofile.metrics.MetricRegistry; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class MicroProfileMetricsFactoryTest { + + @Test + @UseDataProvider(value = "invalidRegistryTypes") + public void should_throw_if_wrong_or_missing_registry_type( + Object registryObj, String expectedMsg) { + // given + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + List enabledMetrics = + Collections.singletonList(DefaultSessionMetric.CQL_REQUESTS.getPath()); + // when + when(config.getDefaultProfile()).thenReturn(profile); + when(context.getConfig()).thenReturn(config); + when(context.getSessionName()).thenReturn("MockSession"); + // registry object is not a registry type + when(context.getMetricRegistry()).thenReturn(registryObj); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(AbstractMetricUpdater.MIN_EXPIRE_AFTER); + when(profile.getStringList(DefaultDriverOption.METRICS_SESSION_ENABLED)) + .thenReturn(enabledMetrics); + // then + try { + new MicroProfileMetricsFactory(context); + fail( + "MetricsFactory should require correct registry object type: " + + MetricRegistry.class.getName()); + } catch (IllegalArgumentException iae) { + assertThat(iae.getMessage()).isEqualTo(expectedMsg); + } + } + + @DataProvider + public static Object[][] invalidRegistryTypes() { + return new Object[][] { + { + Integer.MAX_VALUE, + "Unexpected Metrics registry object. Expected registry object to be of type '" + + MetricRegistry.class.getName() + + "', but was '" + + Integer.class.getName() + + "'" + }, + { + null, + "No metric registry object found. Expected registry object to be of type '" + + MetricRegistry.class.getName() + + "'" + } + }; + } +} diff --git a/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricsUpdaterTest.java b/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricsUpdaterTest.java new file mode 100644 index 00000000000..aa73148fa77 --- /dev/null +++ b/metrics/microprofile/src/test/java/com/datastax/oss/driver/internal/metrics/microprofile/MicroProfileNodeMetricsUpdaterTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.metrics.microprofile; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import ch.qos.logback.classic.Level; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfig; +import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; +import com.datastax.oss.driver.api.core.metrics.NodeMetric; +import com.datastax.oss.driver.internal.core.context.InternalDriverContext; +import com.datastax.oss.driver.internal.core.metrics.AbstractMetricUpdater; +import com.datastax.oss.driver.internal.core.util.LoggerTest; +import com.tngtech.java.junit.dataprovider.DataProvider; +import com.tngtech.java.junit.dataprovider.DataProviderRunner; +import com.tngtech.java.junit.dataprovider.UseDataProvider; +import io.smallrye.metrics.MetricsRegistryImpl; +import java.time.Duration; +import java.util.Collections; +import java.util.Set; +import org.eclipse.microprofile.metrics.Gauge; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(DataProviderRunner.class) +public class MicroProfileNodeMetricsUpdaterTest { + + @Test + public void should_log_warning_when_provided_eviction_time_setting_is_too_low() { + // given + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); + Duration expireAfter = AbstractMetricUpdater.MIN_EXPIRE_AFTER.minusMinutes(1); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(expireAfter); + + MicroProfileNodeMetricUpdater updater = + new MicroProfileNodeMetricUpdater( + node, context, enabledMetrics, new MetricsRegistryImpl()) { + @Override + protected void initializeGauge( + NodeMetric metric, DriverExecutionProfile profile, Gauge supplier) { + // do nothing + } + + @Override + protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { + // do nothing + } + + @Override + protected void initializeTimer(NodeMetric metric, DriverExecutionProfile profile) { + // do nothing + } + }; + + // then + assertThat(updater.getExpireAfter()).isEqualTo(AbstractMetricUpdater.MIN_EXPIRE_AFTER); + verify(logger.appender, timeout(500).times(1)).doAppend(logger.loggingEventCaptor.capture()); + assertThat(logger.loggingEventCaptor.getValue().getMessage()).isNotNull(); + assertThat(logger.loggingEventCaptor.getValue().getFormattedMessage()) + .contains( + String.format( + "[prefix] Value too low for %s: %s. Forcing to %s instead.", + DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER.getPath(), + expireAfter, + AbstractMetricUpdater.MIN_EXPIRE_AFTER)); + } + + @Test + @UseDataProvider(value = "acceptableEvictionTimes") + public void should_not_log_warning_when_provided_eviction_time_setting_is_acceptable( + Duration expireAfter) { + // given + LoggerTest.LoggerSetup logger = + LoggerTest.setupTestLogger(AbstractMetricUpdater.class, Level.WARN); + Node node = mock(Node.class); + InternalDriverContext context = mock(InternalDriverContext.class); + DriverExecutionProfile profile = mock(DriverExecutionProfile.class); + DriverConfig config = mock(DriverConfig.class); + Set enabledMetrics = Collections.singleton(DefaultNodeMetric.CQL_MESSAGES); + + // when + when(context.getSessionName()).thenReturn("prefix"); + when(context.getConfig()).thenReturn(config); + when(config.getDefaultProfile()).thenReturn(profile); + when(profile.getDuration(DefaultDriverOption.METRICS_NODE_EXPIRE_AFTER)) + .thenReturn(expireAfter); + + MicroProfileNodeMetricUpdater updater = + new MicroProfileNodeMetricUpdater( + node, context, enabledMetrics, new MetricsRegistryImpl()) { + @Override + protected void initializeGauge( + NodeMetric metric, DriverExecutionProfile profile, Gauge supplier) { + // do nothing + } + + @Override + protected void initializeCounter(NodeMetric metric, DriverExecutionProfile profile) { + // do nothing + } + + @Override + protected void initializeTimer(NodeMetric metric, DriverExecutionProfile profile) { + // do nothing + } + }; + + // then + assertThat(updater.getExpireAfter()).isEqualTo(expireAfter); + verify(logger.appender, timeout(500).times(0)).doAppend(logger.loggingEventCaptor.capture()); + } + + @DataProvider + public static Object[][] acceptableEvictionTimes() { + return new Object[][] { + {AbstractMetricUpdater.MIN_EXPIRE_AFTER}, + {AbstractMetricUpdater.MIN_EXPIRE_AFTER.plusMinutes(1)} + }; + } +} diff --git a/osgi-tests/README.md b/osgi-tests/README.md new file mode 100644 index 00000000000..1ca6211d427 --- /dev/null +++ b/osgi-tests/README.md @@ -0,0 +1,67 @@ + + +# Java Driver OSGi Tests + +This module contains OSGi tests for the driver. + +It declares a typical "application" bundle containing a few services that rely +on the driver, see `src/main`. + +The integration tests in `src/tests` interrogate the application bundle services +and check that they can operate normally. They exercise different provisioning +configurations to ensure that the driver is usable in most cases. + +## Running the tests + +In order to run the OSGi tests, all other driver modules must have been +previously compiled, that is, their respective `target/classes` directory must +be up-to-date and contain not only the class files, but also an up-to-date OSGi +manifest. + +Therefore, it is recommended to always compile all modules and run the OSGi +integration tests in one single pass, which can be easily done by running, +from the driver's parent module directory: + + mvn clean verify + +This will however also run other integration tests, and might take a long time +to finish. If you prefer to skip other integration tests, and only run the +OSGi ones, you can do so as follows: + + mvn clean verify \ + -DskipParallelizableITs=true \ + -DskipSerialITs=true \ + -DskipIsolatedITs=true + +You can pass the following system properties to your tests: + +1. `ccm.version`: the CCM version to use +2. `ccm.distribution`: choose target backend type (e.g. DSE, HCD) +3. `osgi.debug`: whether to enable remote debugging of the OSGi container (see + below). + +## Debugging OSGi tests + +First, you can enable DEBUG logs for the Pax Exam framework by editing the +`src/tests/resources/logback-test.xml` file. + +Alternatively, you can debug the remote OSGi container by passing the system +property `-Dosgi.debug=true`. In this case the framework will prompt for a +remote debugger on port 5005. diff --git a/osgi-tests/pom.xml b/osgi-tests/pom.xml new file mode 100644 index 00000000000..c2cc4d830f1 --- /dev/null +++ b/osgi-tests/pom.xml @@ -0,0 +1,303 @@ + + + + 4.0.0 + + org.apache.cassandra + java-driver-parent + 4.19.3-SNAPSHOT + + java-driver-osgi-tests + jar + Apache Cassandra Java Driver - OSGi tests + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + + + + org.apache.cassandra + java-driver-core + + + org.apache.cassandra + java-driver-query-builder + + + org.apache.cassandra + java-driver-mapper-processor + + + org.apache.cassandra + java-driver-mapper-runtime + + + com.github.stephenc.jcip + jcip-annotations + provided + + + com.github.spotbugs + spotbugs-annotations + provided + + + ch.qos.logback + logback-classic + + + org.apache.cassandra + java-driver-guava-shaded + + + org.xerial.snappy + snappy-java + + + at.yawk.lz4 + lz4-java + + + org.reactivestreams + reactive-streams + + + com.esri.geometry + esri-geometry-api + + + org.apache.tinkerpop + gremlin-core + + + org.apache.tinkerpop + tinkergraph-gremlin + + + org.osgi + org.osgi.core + provided + + + org.apache.cassandra + java-driver-test-infra + test + + + org.ops4j.pax.exam + pax-exam-junit4 + test + + + org.ops4j.pax.exam + pax-exam-container-forked + test + + + org.ops4j.pax.exam + pax-exam-link-mvn + test + + + org.ops4j.pax.url + pax-url-wrap + test + + + org.ops4j.pax.url + pax-url-reference + test + + + javax.inject + javax.inject + test + + + org.apache.felix + org.apache.felix.framework + test + + + org.assertj + assertj-core + test + + + org.apache.commons + commons-exec + test + + + io.reactivex.rxjava2 + rxjava + test + + + org.awaitility + awaitility + test + + + + + + org.apache.servicemix.tooling + depends-maven-plugin + 1.4.0 + + + generate-depends-file + + generate-depends-file + + + + + + + org.ops4j + maven-pax-plugin + 1.6.0 + + felix + true + + --platform=felix + --version=${felix.version} + --log=debug + --bootDelegation=sun.misc + + + + + org.apache.felix + maven-bundle-plugin + + + com.datastax.oss.driver.osgi + com.datastax.oss.driver.internal.osgi.MailboxActivator + com.datastax.oss.driver.api.osgi.* + com.datastax.oss.driver.internal.osgi.* + !net.jcip.annotations.*,!edu.umd.cs.findbugs.annotations.*,org.apache.tinkerpop.*;resolution:=optional,* + <_include>-osgi.bnd + + + + + bundle-manifest + process-classes + + manifest + + + + + + maven-surefire-plugin + + ${testing.jvm}/bin/java + + ${project.basedir}/src/test/resources/logback-test.xml + + + + + maven-failsafe-plugin + + + osgi-tests + + integration-test + verify + + + + + ${testing.jvm}/bin/java + + ${project.basedir}/src/test/resources/logback-test.xml + + classes + 1 + + + + org.revapi + revapi-maven-plugin + + true + + + + maven-jar-plugin + + true + + + + maven-javadoc-plugin + + true + + + + maven-source-plugin + + true + + + + maven-install-plugin + + true + + + + maven-deploy-plugin + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + true + + + + + diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/CustomRetryPolicy.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/CustomRetryPolicy.java new file mode 100644 index 00000000000..4e6b4e1394c --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/CustomRetryPolicy.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi; + +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; + +public class CustomRetryPolicy extends DefaultRetryPolicy { + + public CustomRetryPolicy(DriverContext context, String profileName) { + super(context, profileName); + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxException.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxException.java new file mode 100644 index 00000000000..112becb2e6d --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service; + +public class MailboxException extends Exception { + + public MailboxException(Throwable cause) { + super("Failure interacting with Mailbox", cause); + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxMessage.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxMessage.java new file mode 100644 index 00000000000..426399da98f --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxMessage.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; +import com.datastax.oss.driver.api.mapper.annotations.CqlName; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.time.Instant; +import java.util.Objects; + +@Entity +@CqlName("messages_by_recipient") +public class MailboxMessage { + + public static final CqlIdentifier MAILBOX_TABLE = + CqlIdentifier.fromInternal("messages_by_recipient"); + + @PartitionKey private String recipient; + + @ClusteringColumn private Instant timestamp; + + private String sender; + + private String body; + + public MailboxMessage() {} + + public MailboxMessage( + @NonNull String recipient, + @NonNull Instant timestamp, + @NonNull String sender, + @NonNull String body) { + this.recipient = recipient; + this.timestamp = timestamp; + this.sender = sender; + this.body = body; + } + + public String getRecipient() { + return recipient; + } + + public void setRecipient(String recipient) { + this.recipient = recipient; + } + + public Instant getTimestamp() { + return timestamp; + } + + public void setTimestamp(Instant timestamp) { + this.timestamp = timestamp; + } + + public String getSender() { + return sender; + } + + public void setSender(String sender) { + this.sender = sender; + } + + public String getBody() { + return body; + } + + public void setBody(String body) { + this.body = body; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof MailboxMessage)) { + return false; + } + MailboxMessage that = (MailboxMessage) o; + return Objects.equals(recipient, that.recipient) + && Objects.equals(timestamp, that.timestamp) + && Objects.equals(sender, that.sender) + && Objects.equals(body, that.body); + } + + @Override + public int hashCode() { + return Objects.hash(recipient, timestamp, sender, body); + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxService.java new file mode 100644 index 00000000000..732a05e6a85 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/MailboxService.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service; + +public interface MailboxService { + + /** + * Retrieve all messages for a given recipient. + * + * @param recipient User whose mailbox is being read. + * @return All messages in the mailbox. + */ + Iterable getMessages(String recipient) throws MailboxException; + + /** + * Stores the given message in the appropriate mailbox. + * + * @param message Message to send. + */ + void sendMessage(MailboxMessage message) throws MailboxException; + + /** + * Deletes all mail for the given recipient. + * + * @param recipient User whose mailbox will be cleared. + */ + void clearMailbox(String recipient) throws MailboxException; +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxMessage.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxMessage.java new file mode 100644 index 00000000000..9b0b52cfa09 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxMessage.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service.geo; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.mapper.annotations.ClusteringColumn; +import com.datastax.oss.driver.api.mapper.annotations.CqlName; +import com.datastax.oss.driver.api.mapper.annotations.Entity; +import com.datastax.oss.driver.api.mapper.annotations.PartitionKey; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Objects; + +@Entity +@CqlName("messages_by_location") +public class GeoMailboxMessage { + + public static final CqlIdentifier MAILBOX_TABLE = + CqlIdentifier.fromInternal("messages_by_location"); + + @PartitionKey private String recipient; + + @ClusteringColumn private Point location; + + private String sender; + + private String body; + + public GeoMailboxMessage() {} + + public GeoMailboxMessage( + @NonNull String recipient, + @NonNull Point location, + @NonNull String sender, + @NonNull String body) { + this.location = location; + this.recipient = recipient; + this.sender = sender; + this.body = body; + } + + public String getRecipient() { + return recipient; + } + + public void setRecipient(String recipient) { + this.recipient = recipient; + } + + public Point getLocation() { + return location; + } + + public void setLocation(Point location) { + this.location = location; + } + + public String getSender() { + return sender; + } + + public void setSender(String sender) { + this.sender = sender; + } + + public String getBody() { + return body; + } + + public void setBody(String body) { + this.body = body; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof GeoMailboxMessage)) { + return false; + } + GeoMailboxMessage that = (GeoMailboxMessage) o; + return Objects.equals(recipient, that.recipient) + && Objects.equals(location, that.location) + && Objects.equals(sender, that.sender) + && Objects.equals(body, that.body); + } + + @Override + public int hashCode() { + return Objects.hash(recipient, location, sender, body); + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxService.java new file mode 100644 index 00000000000..dcb7963ccc3 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/geo/GeoMailboxService.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service.geo; + +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxService; + +public interface GeoMailboxService extends MailboxService { + + void sendGeoMessage(GeoMailboxMessage message) throws MailboxException; + + Iterable getGeoMessages(String recipient) throws MailboxException; + + void clearGeoMailbox(String recipient) throws MailboxException; +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/graph/GraphMailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/graph/GraphMailboxService.java new file mode 100644 index 00000000000..65999957066 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/graph/GraphMailboxService.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service.graph; + +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.MailboxService; + +public interface GraphMailboxService extends MailboxService { + + void sendGraphMessage(MailboxMessage message) throws MailboxException; + + Iterable getGraphMessages(String recipient) throws MailboxException; +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/reactive/ReactiveMailboxService.java b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/reactive/ReactiveMailboxService.java new file mode 100644 index 00000000000..226db1b06d9 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/api/osgi/service/reactive/ReactiveMailboxService.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.osgi.service.reactive; + +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.MailboxService; + +public interface ReactiveMailboxService extends MailboxService { + + MappedReactiveResultSet getMessagesReactive(String recipient) + throws MailboxException; +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/MailboxActivator.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/MailboxActivator.java new file mode 100644 index 00000000000..8dff11520af --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/MailboxActivator.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; +import com.datastax.dse.driver.internal.core.graph.GraphProtocol; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; +import com.datastax.oss.driver.internal.osgi.service.geo.GeoMailboxServiceImpl; +import com.datastax.oss.driver.internal.osgi.service.graph.GraphMailboxServiceImpl; +import com.datastax.oss.driver.internal.osgi.service.reactive.ReactiveMailboxServiceImpl; +import java.net.InetSocketAddress; +import java.util.Dictionary; +import java.util.Hashtable; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.osgi.framework.Bundle; +import org.osgi.framework.BundleActivator; +import org.osgi.framework.BundleContext; +import org.osgi.framework.wiring.BundleWiring; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MailboxActivator implements BundleActivator { + + private static final Logger LOGGER = LoggerFactory.getLogger(MailboxActivator.class); + + private CqlSession session; + private CqlIdentifier keyspace; + private String graphName; + + @Override + public void start(BundleContext context) { + buildSession(context); + registerService(context); + } + + private void buildSession(BundleContext context) { + + Bundle bundle = context.getBundle(); + BundleWiring bundleWiring = bundle.adapt(BundleWiring.class); + ClassLoader classLoader = bundleWiring.getClassLoader(); + + LOGGER.info("Application class loader: {}", classLoader); + + // Use the application bundle class loader to load classes by reflection when + // they are located in the application bundle. This is not strictly required + // as the driver has a "Dynamic-Import:*" directive which makes it capable + // of loading classes outside its bundle. + CqlSessionBuilder builder = CqlSession.builder().withClassLoader(classLoader); + + // Use the application bundle class loader to load configuration resources located + // in the application bundle. This is required, otherwise these resources will + // not be found. + ProgrammaticDriverConfigLoaderBuilder configLoaderBuilder = + DriverConfigLoader.programmaticBuilder(classLoader); + + String contactPointsStr = context.getProperty("cassandra.contactpoints"); + if (contactPointsStr == null) { + contactPointsStr = "127.0.0.1"; + } + LOGGER.info("Contact points: {}", contactPointsStr); + + String portStr = context.getProperty("cassandra.port"); + if (portStr == null) { + portStr = "9042"; + } + LOGGER.info("Port: {}", portStr); + int port = Integer.parseInt(portStr); + + List contactPoints = + Stream.of(contactPointsStr.split(",")) + .map((String host) -> InetSocketAddress.createUnresolved(host, port)) + .collect(Collectors.toList()); + builder.addContactPoints(contactPoints); + + String keyspaceStr = context.getProperty("cassandra.keyspace"); + if (keyspaceStr == null) { + keyspaceStr = "mailbox"; + } + LOGGER.info("Keyspace: {}", keyspaceStr); + keyspace = CqlIdentifier.fromCql(keyspaceStr); + + String lbp = context.getProperty("cassandra.lbp"); + if (lbp != null) { + LOGGER.info("Custom LBP: " + lbp); + configLoaderBuilder.withString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, lbp); + } else { + LOGGER.info("Custom LBP: NO"); + } + + String datacenter = context.getProperty("cassandra.datacenter"); + if (datacenter != null) { + LOGGER.info("Custom datacenter: " + datacenter); + configLoaderBuilder.withString( + DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, datacenter); + } else { + LOGGER.info("Custom datacenter: NO"); + } + + String compression = context.getProperty("cassandra.compression"); + if (compression != null) { + LOGGER.info("Compression: {}", compression); + configLoaderBuilder.withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compression); + } else { + LOGGER.info("Compression: NONE"); + } + + graphName = context.getProperty("cassandra.graph.name"); + if (graphName != null) { + LOGGER.info("Graph name: {}", graphName); + configLoaderBuilder.withString(DseDriverOption.GRAPH_NAME, graphName); + configLoaderBuilder.withString( + DseDriverOption.GRAPH_SUB_PROTOCOL, GraphProtocol.GRAPH_BINARY_1_0.toInternalCode()); + } else { + LOGGER.info("Graph: NONE"); + } + + builder.withConfigLoader(configLoaderBuilder.build()); + + LOGGER.info("Initializing session"); + session = builder.build(); + LOGGER.info("Session initialized"); + } + + private void registerService(BundleContext context) { + MailboxServiceImpl mailbox; + if ("true".equalsIgnoreCase(context.getProperty("cassandra.reactive"))) { + mailbox = new ReactiveMailboxServiceImpl(session, keyspace); + } else if ("true".equalsIgnoreCase(context.getProperty("cassandra.geo"))) { + mailbox = new GeoMailboxServiceImpl(session, keyspace); + } else if ("true".equalsIgnoreCase(context.getProperty("cassandra.graph"))) { + mailbox = new GraphMailboxServiceImpl(session, keyspace, graphName); + } else { + mailbox = new MailboxServiceImpl(session, keyspace); + } + mailbox.init(); + @SuppressWarnings("JdkObsolete") + Dictionary properties = new Hashtable<>(); + context.registerService(MailboxService.class.getName(), mailbox, properties); + LOGGER.info("Mailbox Service successfully initialized"); + } + + @Override + public void stop(BundleContext context) { + if (session != null) { + LOGGER.info("Closing session"); + session.close(); + session = null; + LOGGER.info("Session closed"); + } + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMapper.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMapper.java new file mode 100644 index 00000000000..a67df807e2f --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMapper.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; + +@Mapper +public interface MailboxMapper { + + @DaoFactory + MailboxMessageDao mailboxMessageDao(@DaoKeyspace CqlIdentifier keyspace); +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMessageDao.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMessageDao.java new file mode 100644 index 00000000000..9f6363d90a4 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxMessageDao.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service; + +import com.datastax.oss.driver.api.core.PagingIterable; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; + +@Dao +@DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) +public interface MailboxMessageDao { + + @Insert + void save(MailboxMessage message); + + @Select + PagingIterable findByRecipient(String recipient); +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxServiceImpl.java new file mode 100644 index 00000000000..1da97d7d611 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/MailboxServiceImpl.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; +import static com.datastax.oss.driver.api.querybuilder.relation.Relation.column; + +import com.codahale.metrics.Timer; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.metrics.DefaultSessionMetric; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import java.util.Optional; +import net.jcip.annotations.GuardedBy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class MailboxServiceImpl implements MailboxService { + + private static final Logger LOGGER = LoggerFactory.getLogger(MailboxServiceImpl.class); + + protected final CqlSession session; + protected final CqlIdentifier keyspace; + + @GuardedBy("this") + protected boolean initialized = false; + + private PreparedStatement deleteStatement; + + protected MailboxMessageDao dao; + + public MailboxServiceImpl(CqlSession session, CqlIdentifier keyspace) { + this.session = session; + this.keyspace = keyspace; + } + + public synchronized void init() { + if (initialized) { + return; + } + createSchema(); + prepareStatements(); + createDaos(); + printMetrics(); + initialized = true; + } + + protected void createSchema() { + session.execute("DROP KEYSPACE IF EXISTS test_osgi"); + session.execute( + "CREATE KEYSPACE IF NOT EXISTS test_osgi with replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}"); + session.execute( + "CREATE TABLE " + + keyspace + + "." + + MailboxMessage.MAILBOX_TABLE + + " (" + + "recipient text," + + "timestamp timestamp," + + "sender text," + + "body text," + + "PRIMARY KEY (recipient, timestamp))"); + } + + protected void prepareStatements() { + deleteStatement = + session.prepare( + deleteFrom(keyspace, MailboxMessage.MAILBOX_TABLE) + .where(column("recipient").isEqualTo(bindMarker())) + .build()); + } + + protected void createDaos() { + MailboxMapper mapper = new MailboxMapperBuilder(session).build(); + dao = mapper.mailboxMessageDao(keyspace); + } + + protected void printMetrics() { + // Exercise metrics + if (session.getMetrics().isPresent()) { + Metrics metrics = session.getMetrics().get(); + Optional cqlRequests = metrics.getSessionMetric(DefaultSessionMetric.CQL_REQUESTS); + cqlRequests.ifPresent( + counter -> LOGGER.info("Number of CQL requests: {}", counter.getCount())); + } + } + + @Override + public Iterable getMessages(String recipient) throws MailboxException { + try { + return dao.findByRecipient(recipient); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public void sendMessage(MailboxMessage message) throws MailboxException { + try { + dao.save(message); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public void clearMailbox(String recipient) throws MailboxException { + try { + BoundStatement statement = deleteStatement.bind(recipient); + session.execute(statement); + } catch (Exception e) { + throw new MailboxException(e); + } + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMapper.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMapper.java new file mode 100644 index 00000000000..3beb990c1c9 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMapper.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.geo; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; + +@Mapper +public interface GeoMailboxMapper { + + @DaoFactory + GeoMailboxMessageDao mailboxMessageDao(@DaoKeyspace CqlIdentifier keyspace); +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMessageDao.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMessageDao.java new file mode 100644 index 00000000000..1ea255fbe1d --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxMessageDao.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.geo; + +import com.datastax.oss.driver.api.core.PagingIterable; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Insert; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxMessage; +import com.datastax.oss.driver.internal.osgi.service.MailboxMessageDao; + +@Dao +@DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) +public interface GeoMailboxMessageDao extends MailboxMessageDao { + + @Insert + void save(GeoMailboxMessage message); + + @Select + PagingIterable findGeoByRecipient(String recipient); +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxServiceImpl.java new file mode 100644 index 00000000000..415ffaa35f4 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/geo/GeoMailboxServiceImpl.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.geo; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; +import static com.datastax.oss.driver.api.querybuilder.relation.Relation.column; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxMessage; +import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxService; +import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; + +public class GeoMailboxServiceImpl extends MailboxServiceImpl implements GeoMailboxService { + + private PreparedStatement deleteGeoStatement; + private GeoMailboxMessageDao geoDao; + + public GeoMailboxServiceImpl(CqlSession session, CqlIdentifier keyspace) { + super(session, keyspace); + } + + @Override + protected void createSchema() { + super.createSchema(); + session.execute( + "CREATE TABLE " + + keyspace + + "." + + GeoMailboxMessage.MAILBOX_TABLE + + " (" + + "recipient text," + + "location 'PointType'," + + "sender text," + + "body text," + + "PRIMARY KEY (recipient, location))"); + } + + @Override + protected void prepareStatements() { + super.prepareStatements(); + deleteGeoStatement = + session.prepare( + deleteFrom(keyspace, GeoMailboxMessage.MAILBOX_TABLE) + .where(column("recipient").isEqualTo(bindMarker())) + .build()); + } + + @Override + protected void createDaos() { + super.createDaos(); + GeoMailboxMapper mapper = new GeoMailboxMapperBuilder(session).build(); + geoDao = mapper.mailboxMessageDao(keyspace); + } + + @Override + public void sendGeoMessage(GeoMailboxMessage message) throws MailboxException { + try { + geoDao.save(message); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public Iterable getGeoMessages(String recipient) throws MailboxException { + try { + return geoDao.findGeoByRecipient(recipient); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public void clearGeoMailbox(String recipient) throws MailboxException { + try { + BoundStatement statement = deleteGeoStatement.bind(recipient); + session.execute(statement); + } catch (Exception e) { + throw new MailboxException(e); + } + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/graph/GraphMailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/graph/GraphMailboxServiceImpl.java new file mode 100644 index 00000000000..b4637a27258 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/graph/GraphMailboxServiceImpl.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.graph; + +import static com.datastax.dse.driver.api.core.graph.DseGraph.g; +import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.unfold; + +import com.datastax.dse.driver.api.core.graph.FluentGraphStatement; +import com.datastax.dse.driver.api.core.graph.GraphNode; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.graph.GraphMailboxService; +import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; +import java.time.Instant; +import java.util.stream.Collectors; + +public class GraphMailboxServiceImpl extends MailboxServiceImpl implements GraphMailboxService { + + private final String graphName; + + public GraphMailboxServiceImpl(CqlSession session, CqlIdentifier keyspace, String graphName) { + super(session, keyspace); + this.graphName = graphName; + } + + @Override + protected void createSchema() { + super.createSchema(); + session.execute( + ScriptGraphStatement.newInstance( + String.format("system.graph('%s').ifExists().drop()", graphName)) + .setSystemQuery(true), + ScriptGraphStatement.SYNC); + session.execute( + ScriptGraphStatement.newInstance( + String.format("system.graph('%s').ifNotExists().coreEngine().create()", graphName)) + .setSystemQuery(true), + ScriptGraphStatement.SYNC); + session.execute( + ScriptGraphStatement.newInstance( + "schema.vertexLabel('message')" + + ".partitionBy('recipient', Text)" + + ".clusterBy('timestamp', Timestamp)" + + ".property('sender', Text)" + + ".property('body', Text)" + + ".create();")); + } + + @Override + public Iterable getGraphMessages(String recipient) throws MailboxException { + FluentGraphStatement statement = + FluentGraphStatement.newInstance( + g.V().hasLabel("message").has("recipient", recipient).valueMap().by(unfold())); + try { + return session.execute(statement).all().stream() + .map(GraphNode::asMap) + .map( + vertex -> { + Instant timestamp = (Instant) vertex.get("timestamp"); + String sender = (String) vertex.get("sender"); + String body = (String) vertex.get("body"); + return new MailboxMessage(recipient, timestamp, sender, body); + }) + .collect(Collectors.toList()); + } catch (Exception e) { + throw new MailboxException(e); + } + } + + @Override + public void sendGraphMessage(MailboxMessage message) throws MailboxException { + FluentGraphStatement insertVertex = + FluentGraphStatement.newInstance( + g.addV("message") + .property("recipient", message.getRecipient()) + .property("timestamp", message.getTimestamp()) + .property("sender", message.getSender()) + .property("body", message.getBody())); + try { + session.execute(insertVertex); + } catch (Exception e) { + throw new MailboxException(e); + } + } +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMapper.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMapper.java new file mode 100644 index 00000000000..7a1678c1ac8 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMapper.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.reactive; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.mapper.annotations.DaoFactory; +import com.datastax.oss.driver.api.mapper.annotations.DaoKeyspace; +import com.datastax.oss.driver.api.mapper.annotations.Mapper; + +@Mapper +public interface ReactiveMailboxMapper { + + @DaoFactory + ReactiveMailboxMessageDao mailboxMessageDao(@DaoKeyspace CqlIdentifier keyspace); +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMessageDao.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMessageDao.java new file mode 100644 index 00000000000..fe6f34a839c --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxMessageDao.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.reactive; + +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.mapper.annotations.Dao; +import com.datastax.oss.driver.api.mapper.annotations.DefaultNullSavingStrategy; +import com.datastax.oss.driver.api.mapper.annotations.Select; +import com.datastax.oss.driver.api.mapper.entity.saving.NullSavingStrategy; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.internal.osgi.service.MailboxMessageDao; + +@Dao +@DefaultNullSavingStrategy(NullSavingStrategy.SET_TO_NULL) +public interface ReactiveMailboxMessageDao extends MailboxMessageDao { + + @Select + MappedReactiveResultSet findByRecipientReactive(String recipient); +} diff --git a/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxServiceImpl.java b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxServiceImpl.java new file mode 100644 index 00000000000..5333524e884 --- /dev/null +++ b/osgi-tests/src/main/java/com/datastax/oss/driver/internal/osgi/service/reactive/ReactiveMailboxServiceImpl.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.service.reactive; + +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.reactive.ReactiveMailboxService; +import com.datastax.oss.driver.internal.osgi.service.MailboxServiceImpl; + +public class ReactiveMailboxServiceImpl extends MailboxServiceImpl + implements ReactiveMailboxService { + + private ReactiveMailboxMessageDao reactiveDao; + + public ReactiveMailboxServiceImpl(CqlSession session, CqlIdentifier keyspace) { + super(session, keyspace); + } + + @Override + protected void createDaos() { + super.createDaos(); + ReactiveMailboxMapper mapper = new ReactiveMailboxMapperBuilder(session).build(); + reactiveDao = mapper.mailboxMessageDao(keyspace); + } + + @Override + public MappedReactiveResultSet getMessagesReactive(String recipient) + throws MailboxException { + try { + return reactiveDao.findByRecipientReactive(recipient); + } catch (Exception e) { + throw new MailboxException(e); + } + } +} diff --git a/osgi-tests/src/main/resources/application.conf b/osgi-tests/src/main/resources/application.conf new file mode 100644 index 00000000000..0c3e8e76c98 --- /dev/null +++ b/osgi-tests/src/main/resources/application.conf @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Configuration overrides for integration tests +datastax-java-driver { + basic { + load-balancing-policy.class = DcInferringLoadBalancingPolicy + request.timeout = 10 seconds + graph.timeout = 10 seconds + } + advanced { + retry-policy.class = com.datastax.oss.driver.api.osgi.CustomRetryPolicy + connection { + init-query-timeout = 5 seconds + set-keyspace-timeout = 5 seconds + } + heartbeat.timeout = 5 seconds + control-connection.timeout = 5 seconds + request { + trace.interval = 1 second + warn-if-set-keyspace = false + } + graph { + name = "demo" + } + continuous-paging.timeout { + first-page = 10 seconds + other-pages = 10 seconds + } + metrics { + session.enabled = [cql-requests] + // Raise histogram bounds because the tests execute DDL queries with a higher timeout + session.cql_requests.highest_latency = 30 seconds + } + // adjust quiet period to 0 seconds to speed up tests + netty { + io-group { + shutdown {quiet-period = 0, timeout = 15, unit = SECONDS} + } + admin-group { + shutdown {quiet-period = 0, timeout = 15, unit = SECONDS} + } + } + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiCustomLoadBalancingPolicyIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiCustomLoadBalancingPolicyIT.java new file mode 100644 index 00000000000..99bd7294934 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiCustomLoadBalancingPolicyIT.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +/** + * Test that uses a policy from a separate bundle from the core driver to ensure that the driver is + * able to load that policy via Reflection. To support this, the driver uses + * DynamicImport-Package: *. + */ +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +public class OsgiCustomLoadBalancingPolicyIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.testBundles(), + CoreOptions.systemProperty("cassandra.lbp") + // This LBP resides in test-infra bundle and will be loaded the driver + // class loader, thanks to the "Dynamic-Import:*" directive + .value(SortingLoadBalancingPolicy.class.getName())); + } + + @Test + public void test_custom_lbp() throws Exception { + DefaultServiceChecks.checkService(service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiDefaultIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiDefaultIT.java new file mode 100644 index 00000000000..a4dec25d96f --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiDefaultIT.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +public class OsgiDefaultIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + // this configuration purposely excludes bundles whose resolution should be optional: + // ESRI, Reactive Streams and Tinkerpop. This allows to validate that the driver can still + // work properly in an OSGi container as long as the missing packages are not accessed. + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.testBundles()); + } + + @Test + public void test_default() throws Exception { + DefaultServiceChecks.checkService(service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGeoTypesIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGeoTypesIT.java new file mode 100644 index 00000000000..c5ca962a66b --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGeoTypesIT.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxService; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.checks.GeoServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "5.0", + description = "Requires geo types") +public class OsgiGeoTypesIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.esriBundles(), + BundleOptions.testBundles()); + } + + @Test + public void test_geo_types() throws Exception { + DefaultServiceChecks.checkService(service); + assertThat(service).isInstanceOf(GeoMailboxService.class); + GeoServiceChecks.checkServiceGeo((GeoMailboxService) service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGraphIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGraphIT.java new file mode 100644 index 00000000000..be6997b9d02 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiGraphIT.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.api.osgi.service.graph.GraphMailboxService; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.checks.GraphServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +@BackendRequirement( + type = BackendType.DSE, + minInclusive = "6.8", + description = "Requires Core Graph") +public class OsgiGraphIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.tinkerpopBundles(), + BundleOptions.testBundles()); + } + + @Test + public void test_graph() throws Exception { + DefaultServiceChecks.checkService(service); + assertThat(service).isInstanceOf(GraphMailboxService.class); + GraphServiceChecks.checkGraphService((GraphMailboxService) service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiLz4IT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiLz4IT.java new file mode 100644 index 00000000000..e8f470d3fdc --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiLz4IT.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +public class OsgiLz4IT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.lz4Bundle(), + BundleOptions.testBundles()); + } + + @Test + public void test_lz4_compression() throws Exception { + DefaultServiceChecks.checkService(service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiReactiveIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiReactiveIT.java new file mode 100644 index 00000000000..1710414b67d --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiReactiveIT.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.api.osgi.service.reactive.ReactiveMailboxService; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.checks.ReactiveServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +public class OsgiReactiveIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.reactiveBundles(), + BundleOptions.testBundles()); + } + + @Test + public void test_reactive() throws Exception { + DefaultServiceChecks.checkService(service); + assertThat(service).isInstanceOf(ReactiveMailboxService.class); + ReactiveServiceChecks.checkServiceReactive((ReactiveMailboxService) service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiShadedIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiShadedIT.java new file mode 100644 index 00000000000..780ed30874d --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiShadedIT.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +public class OsgiShadedIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreShadedBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + // Netty and Jackson are shaded + BundleOptions.testBundles()); + } + + @Test + public void test_shaded() throws Exception { + DefaultServiceChecks.checkService(service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiSnappyIT.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiSnappyIT.java new file mode 100644 index 00000000000..37abceeed7a --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/OsgiSnappyIT.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi; + +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.internal.osgi.checks.DefaultServiceChecks; +import com.datastax.oss.driver.internal.osgi.support.BundleOptions; +import com.datastax.oss.driver.internal.osgi.support.CcmExamReactorFactory; +import com.datastax.oss.driver.internal.osgi.support.CcmPaxExam; +import javax.inject.Inject; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.ops4j.pax.exam.Configuration; +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.Option; +import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy; + +@RunWith(CcmPaxExam.class) +@ExamReactorStrategy(CcmExamReactorFactory.class) +@BackendRequirement(type = BackendType.CASSANDRA, maxExclusive = "4.0.0") +public class OsgiSnappyIT { + + @Inject MailboxService service; + + @Configuration + public Option[] config() { + return CoreOptions.options( + BundleOptions.applicationBundle(), + BundleOptions.driverCoreBundle(), + BundleOptions.driverQueryBuilderBundle(), + BundleOptions.driverMapperRuntimeBundle(), + BundleOptions.commonBundles(), + BundleOptions.nettyBundles(), + BundleOptions.jacksonBundles(), + BundleOptions.snappyBundle(), + BundleOptions.testBundles()); + } + + @Test + public void test_snappy_compression() throws Exception { + DefaultServiceChecks.checkService(service); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/DefaultServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/DefaultServiceChecks.java new file mode 100644 index 00000000000..90a6a2e4c8b --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/DefaultServiceChecks.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.checks; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.MailboxService; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +public class DefaultServiceChecks { + + /** + * Exercises an OSGi service provided by an OSGi bundle that depends on the driver. Ensures that + * queries can be made through the service with the current given configuration. + */ + public static void checkService(MailboxService service) throws Exception { + // Insert some data into mailbox for a particular user. + String recipient = "user@datastax.com"; + try { + List insertedMessages = new ArrayList<>(); + for (int i = 0; i < 30; i++) { + Instant timestamp = Instant.ofEpochMilli(i); + MailboxMessage message = new MailboxMessage(recipient, timestamp, "sender" + i, "body" + i); + insertedMessages.add(message); + service.sendMessage(message); + } + Iterable retrievedMessages = service.getMessages(recipient); + assertThat(retrievedMessages).containsExactlyElementsOf(insertedMessages); + } finally { + service.clearMailbox(recipient); + } + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GeoServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GeoServiceChecks.java new file mode 100644 index 00000000000..a0fb35e2df5 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GeoServiceChecks.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.checks; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.data.geometry.Point; +import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxMessage; +import com.datastax.oss.driver.api.osgi.service.geo.GeoMailboxService; +import java.util.ArrayList; +import java.util.List; + +public class GeoServiceChecks { + + public static void checkServiceGeo(GeoMailboxService service) throws Exception { + // Insert some data into mailbox for a particular user. + String recipient = "user@datastax.com"; + try { + List insertedMessages = new ArrayList<>(); + for (int i = 0; i < 30; i++) { + Point location = Point.fromCoordinates(i, i); + GeoMailboxMessage message = + new GeoMailboxMessage(recipient, location, "sender" + i, "body" + i); + insertedMessages.add(message); + service.sendGeoMessage(message); + } + Iterable retrievedMessages = service.getGeoMessages(recipient); + assertThat(retrievedMessages).containsExactlyInAnyOrderElementsOf(insertedMessages); + } finally { + service.clearGeoMailbox(recipient); + } + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GraphServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GraphServiceChecks.java new file mode 100644 index 00000000000..40bda10900b --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/GraphServiceChecks.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.checks; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.graph.GraphMailboxService; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +public class GraphServiceChecks { + + public static void checkGraphService(GraphMailboxService service) throws MailboxException { + // Insert some data into mailbox for a particular user. + String recipient = "user@datastax.com"; + List insertedMessages = new ArrayList<>(); + for (int i = 0; i < 30; i++) { + Instant timestamp = Instant.ofEpochMilli(i); + MailboxMessage message = new MailboxMessage(recipient, timestamp, "sender" + i, "body" + i); + insertedMessages.add(message); + service.sendGraphMessage(message); + } + Iterable retrievedMessages = service.getGraphMessages(recipient); + assertThat(retrievedMessages).containsExactlyElementsOf(insertedMessages); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/ReactiveServiceChecks.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/ReactiveServiceChecks.java new file mode 100644 index 00000000000..fc4aa3448af --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/checks/ReactiveServiceChecks.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.checks; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.mapper.reactive.MappedReactiveResultSet; +import com.datastax.oss.driver.api.osgi.service.MailboxException; +import com.datastax.oss.driver.api.osgi.service.MailboxMessage; +import com.datastax.oss.driver.api.osgi.service.reactive.ReactiveMailboxService; +import io.reactivex.Flowable; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +public class ReactiveServiceChecks { + + public static void checkServiceReactive(ReactiveMailboxService service) throws MailboxException { + // Insert some data into mailbox for a particular user. + String recipient = "user@datastax.com"; + try { + List insertedMessages = new ArrayList<>(); + for (int i = 0; i < 30; i++) { + Instant timestamp = Instant.ofEpochMilli(i); + MailboxMessage message = new MailboxMessage(recipient, timestamp, "sender" + i, "body" + i); + insertedMessages.add(message); + service.sendMessage(message); + } + MappedReactiveResultSet retrievedMessages = + service.getMessagesReactive(recipient); + List messageList = + Flowable.fromPublisher(retrievedMessages).toList().blockingGet(); + assertThat(messageList).containsExactlyElementsOf(insertedMessages); + } finally { + service.clearMailbox(recipient); + } + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/BundleOptions.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/BundleOptions.java new file mode 100644 index 00000000000..378b515aa65 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/BundleOptions.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.support; + +import static org.ops4j.pax.exam.CoreOptions.bundle; +import static org.ops4j.pax.exam.CoreOptions.junitBundles; +import static org.ops4j.pax.exam.CoreOptions.mavenBundle; +import static org.ops4j.pax.exam.CoreOptions.options; +import static org.ops4j.pax.exam.CoreOptions.systemProperty; +import static org.ops4j.pax.exam.CoreOptions.systemTimeout; +import static org.ops4j.pax.exam.CoreOptions.vmOption; + +import org.ops4j.pax.exam.CoreOptions; +import org.ops4j.pax.exam.options.CompositeOption; +import org.ops4j.pax.exam.options.UrlProvisionOption; +import org.ops4j.pax.exam.options.WrappedUrlProvisionOption; + +public class BundleOptions { + + public static CompositeOption commonBundles() { + return () -> + options( + mavenBundle("org.apache.cassandra", "java-driver-guava-shaded").versionAsInProject(), + mavenBundle("io.dropwizard.metrics", "metrics-core").versionAsInProject(), + mavenBundle("org.slf4j", "slf4j-api").versionAsInProject(), + mavenBundle("org.hdrhistogram", "HdrHistogram").versionAsInProject(), + mavenBundle("com.typesafe", "config").versionAsInProject(), + mavenBundle("com.datastax.oss", "native-protocol").versionAsInProject(), + logbackBundles(), + debugOptions()); + } + + public static CompositeOption applicationBundle() { + return () -> + options( + systemProperty("cassandra.contactpoints").value("127.0.0.1"), + systemProperty("cassandra.port").value("9042"), + systemProperty("cassandra.keyspace").value("test_osgi"), + bundle("reference:file:target/classes")); + } + + public static UrlProvisionOption driverCoreBundle() { + return bundle("reference:file:../core/target/classes"); + } + + public static UrlProvisionOption driverCoreShadedBundle() { + return bundle("reference:file:../core-shaded/target/classes"); + } + + public static UrlProvisionOption driverQueryBuilderBundle() { + return bundle("reference:file:../query-builder/target/classes"); + } + + public static UrlProvisionOption driverMapperRuntimeBundle() { + return bundle("reference:file:../mapper-runtime/target/classes"); + } + + public static UrlProvisionOption driverTestInfraBundle() { + return bundle("reference:file:../test-infra/target/classes"); + } + + public static CompositeOption testBundles() { + return () -> + options( + driverTestInfraBundle(), + mavenBundle("org.apache.commons", "commons-exec").versionAsInProject(), + mavenBundle("org.assertj", "assertj-core").versionAsInProject(), + mavenBundle("org.awaitility", "awaitility").versionAsInProject(), + mavenBundle("org.hamcrest", "hamcrest").versionAsInProject(), + junitBundles()); + } + + public static CompositeOption nettyBundles() { + return () -> + options( + mavenBundle("io.netty", "netty-handler").versionAsInProject(), + mavenBundle("io.netty", "netty-buffer").versionAsInProject(), + mavenBundle("io.netty", "netty-codec").versionAsInProject(), + mavenBundle("io.netty", "netty-common").versionAsInProject(), + mavenBundle("io.netty", "netty-transport").versionAsInProject(), + mavenBundle("io.netty", "netty-transport-native-unix-common").versionAsInProject(), + mavenBundle("io.netty", "netty-resolver").versionAsInProject()); + } + + public static CompositeOption logbackBundles() { + return () -> + options( + mavenBundle("ch.qos.logback", "logback-classic").versionAsInProject(), + mavenBundle("ch.qos.logback", "logback-core").versionAsInProject(), + systemProperty("logback.configurationFile") + .value("file:src/test/resources/logback-test.xml")); + } + + public static CompositeOption jacksonBundles() { + return () -> + options( + mavenBundle("com.fasterxml.jackson.core", "jackson-databind").versionAsInProject(), + mavenBundle("com.fasterxml.jackson.core", "jackson-core").versionAsInProject(), + mavenBundle("com.fasterxml.jackson.core", "jackson-annotations").versionAsInProject()); + } + + public static CompositeOption lz4Bundle() { + return () -> + options( + mavenBundle("at.yawk.lz4", "lz4-java").versionAsInProject(), + systemProperty("cassandra.compression").value("LZ4")); + } + + public static CompositeOption snappyBundle() { + return () -> + options( + mavenBundle("org.xerial.snappy", "snappy-java").versionAsInProject(), + systemProperty("cassandra.compression").value("SNAPPY")); + } + + public static CompositeOption tinkerpopBundles() { + return () -> + options( + CoreOptions.wrappedBundle( + mavenBundle("org.apache.tinkerpop", "gremlin-core").versionAsInProject()) + .exports( + // avoid exporting 'org.apache.tinkerpop.gremlin.*' as other Tinkerpop jars have + // this root package as well + "org.apache.tinkerpop.gremlin.jsr223.*", + "org.apache.tinkerpop.gremlin.process.*", + "org.apache.tinkerpop.gremlin.structure.*", + "org.apache.tinkerpop.gremlin.util.*") + .bundleSymbolicName("org.apache.tinkerpop.gremlin-core") + .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), + CoreOptions.wrappedBundle( + mavenBundle("org.apache.tinkerpop", "tinkergraph-gremlin").versionAsInProject()) + .exports("org.apache.tinkerpop.gremlin.tinkergraph.*") + .bundleSymbolicName("org.apache.tinkerpop.tinkergraph-gremlin") + .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), + CoreOptions.wrappedBundle( + mavenBundle("org.apache.tinkerpop", "gremlin-shaded").versionAsInProject()) + .exports("org.apache.tinkerpop.shaded.*") + .bundleSymbolicName("org.apache.tinkerpop.gremlin-shaded") + .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), + // Note: the versions below are hard-coded because they shouldn't change very often, + // but if the tests fail because of them, we should consider parameterizing them + mavenBundle("com.sun.activation", "jakarta.activation", "2.0.1"), + mavenBundle("com.sun.mail", "mailapi", "2.0.1"), + mavenBundle("org.apache.commons", "commons-text", "1.8"), + mavenBundle("org.apache.commons", "commons-configuration2", "2.9.0"), + CoreOptions.wrappedBundle(mavenBundle("commons-logging", "commons-logging", "1.1.1")) + .exports("org.apache.commons.logging.*") + .bundleVersion("1.1.1") + .bundleSymbolicName("org.apache.commons.commons-logging") + .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), + mavenBundle("commons-collections", "commons-collections", "3.2.2"), + mavenBundle("org.apache.commons", "commons-lang3", "3.8.1"), + mavenBundle("commons-lang", "commons-lang", "2.6"), + CoreOptions.wrappedBundle(mavenBundle("org.javatuples", "javatuples", "1.2")) + .exports("org.javatuples.*") + .bundleVersion("1.2") + .bundleSymbolicName("org.javatuples") + .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), + systemProperty("cassandra.graph").value("true"), + systemProperty("cassandra.graph.name").value("test_osgi_graph")); + } + + public static CompositeOption esriBundles() { + return () -> + options( + CoreOptions.wrappedBundle( + mavenBundle("com.esri.geometry", "esri-geometry-api").versionAsInProject()) + .exports("com.esri.core.geometry.*") + .imports("org.json", "org.codehaus.jackson") + .bundleSymbolicName("com.esri.core.geometry") + .overwriteManifest(WrappedUrlProvisionOption.OverwriteMode.FULL), + mavenBundle("org.json", "json").versionAsInProject(), + mavenBundle("org.codehaus.jackson", "jackson-core-asl").versionAsInProject(), + systemProperty("cassandra.geo").value("true")); + } + + public static CompositeOption reactiveBundles() { + return () -> + options( + mavenBundle("org.reactivestreams", "reactive-streams").versionAsInProject(), + mavenBundle("io.reactivex.rxjava2", "rxjava").versionAsInProject(), + systemProperty("cassandra.reactive").value("true")); + } + + private static CompositeOption debugOptions() { + boolean debug = Boolean.getBoolean("osgi.debug"); + if (debug) { + return () -> + options( + vmOption("-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"), + systemTimeout(Long.MAX_VALUE)); + } else { + return CoreOptions::options; + } + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmExamReactorFactory.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmExamReactorFactory.java new file mode 100644 index 00000000000..eb9e71a76d9 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmExamReactorFactory.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.support; + +import java.util.List; +import org.ops4j.pax.exam.TestContainer; +import org.ops4j.pax.exam.TestProbeBuilder; +import org.ops4j.pax.exam.spi.StagedExamReactor; +import org.ops4j.pax.exam.spi.StagedExamReactorFactory; + +public class CcmExamReactorFactory implements StagedExamReactorFactory { + + @Override + public StagedExamReactor create(List containers, List mProbes) { + return new CcmStagedReactor(containers, mProbes); + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmPaxExam.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmPaxExam.java new file mode 100644 index 00000000000..d872acfa2b5 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmPaxExam.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.support; + +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; +import org.junit.AssumptionViolatedException; +import org.junit.runner.Description; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.model.InitializationError; +import org.ops4j.pax.exam.junit.PaxExam; + +public class CcmPaxExam extends PaxExam { + + public CcmPaxExam(Class klass) throws InitializationError { + super(klass); + } + + @Override + public void run(RunNotifier notifier) { + Description description = getDescription(); + if (BackendRequirementRule.meetsDescriptionRequirements(description)) { + super.run(notifier); + } else { + // requirements not met, throw reasoning assumption to skip test + AssumptionViolatedException e = + new AssumptionViolatedException(BackendRequirementRule.buildReasonString(description)); + notifier.fireTestAssumptionFailed(new Failure(description, e)); + } + } +} diff --git a/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmStagedReactor.java b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmStagedReactor.java new file mode 100644 index 00000000000..ce4d9095361 --- /dev/null +++ b/osgi-tests/src/test/java/com/datastax/oss/driver/internal/osgi/support/CcmStagedReactor.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.osgi.support; + +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import java.util.List; +import java.util.Objects; +import net.jcip.annotations.GuardedBy; +import org.ops4j.pax.exam.TestContainer; +import org.ops4j.pax.exam.TestProbeBuilder; +import org.ops4j.pax.exam.spi.reactors.AllConfinedStagedReactor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CcmStagedReactor extends AllConfinedStagedReactor { + + private static final Logger LOGGER = LoggerFactory.getLogger(CcmStagedReactor.class); + + public static final CcmBridge CCM_BRIDGE; + + public static final Version DSE_5_0 = Objects.requireNonNull(Version.parse("5.0")); + + static { + CcmBridge.Builder builder = CcmBridge.builder().withNodes(1); + if (CcmBridge.isDistributionOf(BackendType.DSE, (dist, cass) -> dist.compareTo(DSE_5_0) >= 0)) { + builder.withDseWorkloads("graph"); + } + CCM_BRIDGE = builder.build(); + } + + @GuardedBy("this") + private boolean running = false; + + public CcmStagedReactor(List containers, List mProbes) { + super(containers, mProbes); + } + + @Override + public synchronized void beforeSuite() { + if (!running) { + LOGGER.info( + "Starting CCM, running {} version {}", + CcmBridge.DISTRIBUTION, + CcmBridge.getDistributionVersion()); + CCM_BRIDGE.create(); + CCM_BRIDGE.start(); + LOGGER.info("CCM started"); + running = true; + Runtime.getRuntime() + .addShutdownHook( + new Thread( + () -> { + try { + afterSuite(); + } catch (Exception e) { + // silently remove as may have already been removed. + } + })); + } + } + + @Override + public synchronized void afterSuite() { + if (running) { + LOGGER.info("Stopping CCM"); + CCM_BRIDGE.stop(); + CCM_BRIDGE.close(); + running = false; + LOGGER.info("CCM stopped"); + } + } +} diff --git a/osgi-tests/src/test/resources/exam.properties b/osgi-tests/src/test/resources/exam.properties new file mode 100644 index 00000000000..ad702b0672c --- /dev/null +++ b/osgi-tests/src/test/resources/exam.properties @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pax.exam.system=test +pax.exam.logging=none \ No newline at end of file diff --git a/osgi-tests/src/test/resources/logback-test.xml b/osgi-tests/src/test/resources/logback-test.xml new file mode 100644 index 00000000000..6c2a3f70250 --- /dev/null +++ b/osgi-tests/src/test/resources/logback-test.xml @@ -0,0 +1,36 @@ + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + diff --git a/performance/README.md b/performance/README.md new file mode 100644 index 00000000000..ff66a453e9b --- /dev/null +++ b/performance/README.md @@ -0,0 +1,95 @@ + + +# How to run the Driver duration tests + +Note: the procedure described in this page is currently only accessible to DataStax employees. + +## Overview + +A duration test applies a constant, pre-defined load to the cluster for an extended period of time, +typically 2 or 3 days, while also generating some chaos by randomly restarting nodes. The load is +a mix of reads, writes, and deletes. + +Duration tests are useful to detect performance regressions between 2 different driver versions. + +The Java Driver duration tests are stored in a [private +repository](https://github.com/riptano/driver-examples/tree/java-driver-4.x/java/durationTest/) +accessible only to DataStax employees. + +A duration test executes in an infinite loop the following actions: + +1. Confirm row does not exist +2. Write row +3. Confirm read of row +4. Delete row +5. Confirm row does not exist + +The actions are performed randomly via SimpleStatements, BatchStatements (except on reads), and +PreparedStatements. + +## Running the duration tests on DataStax Fallout + +DataStax internal Fallout server has modules that allow to automate running and monitoring duration +tests. + +### Step 0: Set up a Graphite server + +1. If you haven't done this yet, create a new Fallout test based on the [graphite-setup.yaml] + template. +2. Run the test and wait for its successful completion. + * Choose a `keep_alive` parameter that is large enough to run all the planned duration tests. + E.g. if you intend to run duration tests for 10 days, set this parameter to a value greater + than or equal to `10d`. The default is 15 days. +3. Obtain the IP of the Graphite server: + * Navigate to the test artifacts. The IP can be found in the `ctool-cluster-info.txt` file of + the server group: + ![ctool-cluster-info](ctool-cluster-info.png) + * Log in to the Graphite server to check that the server was correctly set up: + `http://:3000` (VPN required). + The username/password is Graphite's default: `admin/admin`. + +Two Grafana dashboards should be loaded automatically: + +* `Java Driver 4 Duration Test Metrics (aggregate)`: provides high-level information such as + the number of completed tests per minute. Useful to compare different test runs. +* `Java Driver 4 Duration Test Metrics (focus)`: provides detailed information for one specific + test run. Can be useful to drill down on issues encountered during the test, or to inspect + latencies, throughput, etc. + +If the above Grafana dashboards are not loaded for some reason, they can be found in this [private +repository](https://github.com/riptano/testeng-devtools/tree/master/duration-tests/java/grafana). + +### Steps 1 to N: Run duration tests and compare results + +1. If you haven't done this yet, create a new Fallout test based on the [duration-test.yaml] + template. +2. For each combination of server and driver that you wish to test, launch a distinct test run and + modify its parameters to match the desired scenario: + * Change `server_type` and`server_version` to match the exact server you plan on testing + against; + * Change `driver_rev` and `driver_label` to be whatever driver revision you are using ( + `driver_label` is merely for reporting purposes); + * Don't forget to change the `graphite_host` parameter to match the Graphite server IP obtained + in the previous step; + * Finally, choose the desired duration (default is 2 days). +3. Run the test and monitor the performance on the Graphite server. + +Once a test run is finished, the cluster and the client VMs are destroyed, but their logs are +conserved as test artifacts in Fallout. diff --git a/performance/ctool-cluster-info.png b/performance/ctool-cluster-info.png new file mode 100644 index 00000000000..550b077b7eb Binary files /dev/null and b/performance/ctool-cluster-info.png differ diff --git a/performance/duration-test.yaml b/performance/duration-test.yaml new file mode 100644 index 00000000000..6e718f2add8 --- /dev/null +++ b/performance/duration-test.yaml @@ -0,0 +1,99 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Possible values: cassandra or dse +server_type: cassandra +# Server version (e.g. 3.11.7 or 6.8.8) +server_version: 3.11.7 +# The driver Git revision to checkout and build (can be a branch name, a tag name or a commit SHA) +driver_rev: 4.x +# A distinctive driver label to use, for reporting purposes (will appear in Graphite metric names) +driver_label: 4.10.0 +# The IP of a running Graphite server, see graphite-setup.yaml +graphite_host: 1.2.3.4 +# How long to run the duration test, default: 2 days +duration: 2d +# Cloud-specific settings +cloud_provider: nebula +cloud_tenant: drivers-automation +instance_type: m4.4xlarge + +--- + +ensemble: + server: + node.count: 3 + provisioner: + name: ctool + properties: + mark_for_reuse: false + cloud.provider: {{cloud_provider}} + cloud.tenant: {{cloud_tenant}} + cloud.instance.type: {{instance_type}} + configuration_manager: + - name: ctool + properties: + java.version: openjdk8 + product.install.type: tarball + product.type: {{server_type}} + product.version: {{server_version}} + cassandra.yaml: + hinted_handoff_enabled: false + datacenters: + datacenter1: + size: 3 + workload: cassandra + client: + node.count: 1 + provisioner: + name: ctool + properties: + mark_for_reuse: false + cloud.provider: {{cloud_provider}} + cloud.tenant: {{cloud_tenant}} + cloud.instance.type: {{instance_type}} + configuration_manager: + - name: ctool + properties: + java.version: openjdk8 + install.maven: true + - name: java_driver + properties: + oss.git.repository: git@github.com:datastax/java-driver.git + oss.git.branch: {{driver_rev}} + type: FOUR_X_OSS + - name: java_driver_duration_test + properties: + git.branch: java-driver-4.x +workload: + phases: + - run-duration-test: + module: java_driver_duration_test + properties: + is.four: true + duration: {{duration}} + graphite.host: {{graphite_host}} + graphite.prefix: duration-test-java-driver-{{driver_label}}-{{server_type}}-{{server_version}} + kill-nodes: + module: killnode_rhino + properties: + target.strategy: whitelist + target.number_of_nodes: 1 + target.selector: "*:*" + repeat.delay: 120 + repeat.iterations: 0 + graceful: true diff --git a/performance/graphite-setup.yaml b/performance/graphite-setup.yaml new file mode 100644 index 00000000000..99bb8ecc8cc --- /dev/null +++ b/performance/graphite-setup.yaml @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# How long should the Graphite server be kept alive, default: 15 days +keep_alive: 15d +# Cloud-specific settings +cloud_provider: nebula +cloud_tenant: drivers-automation +instance_type: m4.2xlarge + +--- + +ensemble: + server: + node.count: 1 + provisioner: + name: ctool + properties: + mark_for_reuse: true + cluster_ttl: {{keep_alive}} + cloud.provider: {{cloud_provider}} + cloud.tenant: {{cloud_tenant}} + cloud.instance.type: {{instance_type}} + configuration_manager: + - name: ctool_monitoring + properties: + graphite.create_server: true + client: + node.count: 1 + provisioner: + name: ctool + properties: + mark_for_reuse: false + cloud.provider: {{cloud_provider}} + cloud.tenant: {{cloud_tenant}} + cloud.instance.type: {{instance_type}} +workload: + phases: + - upload-dashboards-to-grafana: + module: bash + properties: + script: | + echo "Graphite server IP: ${FALLOUT_SERVER_NODE0_MONITORING_GRAPHITE_HOST}" + git clone git@github.com:riptano/testeng-devtools.git ${FALLOUT_SCRATCH_DIR}/dashboard + curl --user admin:admin -d "@${FALLOUT_SCRATCH_DIR}/dashboard/duration-tests/java/grafana/aggregate4.json" -X POST -H "Content-Type: application/json" http://${FALLOUT_SERVER_NODE0_MONITORING_GRAPHITE_HOST}:3000/api/dashboards/db/ + curl --user admin:admin -d "@${FALLOUT_SCRATCH_DIR}/dashboard/duration-tests/java/grafana/focus4.json" -X POST -H "Content-Type: application/json" http://${FALLOUT_SERVER_NODE0_MONITORING_GRAPHITE_HOST}:3000/api/dashboards/db/ + target.group: client diff --git a/pom.xml b/pom.xml index d191a3d7664..eb83459cfb4 100644 --- a/pom.xml +++ b/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + + org.apache + apache + 23 + + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT pom - - DataStax Java driver for Apache Cassandra(R) - - A driver for Apache Cassandra(R) 2.1+ that works exclusively with the Cassandra Query Language - version 3 (CQL3) and Cassandra's native protocol versions 3 and above. - + Apache Cassandra Java Driver https://github.com/datastax/java-driver 2017 - core core-shaded query-builder mapper-runtime mapper-processor + metrics/micrometer + metrics/microprofile + guava-shaded test-infra integration-tests + osgi-tests + distribution-source distribution + distribution-tests examples + bom - - true UTF-8 UTF-8 - 1.3.3 - 25.1-jre - 2.1.11 - 4.0.5 - 1.4.5 - 4.1.34.Final + 1.4.1 + + 2.1.12 + 4.1.18 + 4.1.130.Final + 1.2.1 + + 3.5.6 + 1.7.26 + + 1.0.3 + 20230227 + 2.13.5 + ${jackson.version} - 1.1.7.2 - 1.5.1 + 1.1.10.1 + 1.10.1 - 3.12.1 + 3.19.0 1.3 - 4.12 + 4.13.2 1.2.3 - 4.12.0 - 0.8.8 - 1.0 - 2.28 + 6.0.0 + 7.0.1 + 4.13.4 + 2.6.4 + 0.11.0 + 1.1.4 + 2.31 2.5.0 - 2.0.1 + 2.1.1 1.1.4 - 2.9.9 - 2.9.9.1 + 2.2.2 + 4.0.3 + 2.0.0-M19 + 3.0.0 + 22.0.0.2 + false + ${skipTests} - - com.datastax.oss - native-protocol - ${native-protocol.version} + org.apache.cassandra + java-driver-core + ${project.version} + test-jar io.netty @@ -86,15 +111,10 @@ ${netty.version} - com.datastax.oss - java-driver-shaded-guava - ${guava.version} - - - + com.google.guava guava - ${guava.version} + 33.3.1-jre com.typesafe @@ -111,25 +131,21 @@ logback-classic ${logback.version} - - com.github.jnr - jnr-ffi - 2.1.9 - org.xerial.snappy snappy-java ${snappy.version} - org.lz4 + at.yawk.lz4 lz4-java ${lz4.version} com.github.jnr jnr-posix - 3.0.49 + + 3.1.15 io.dropwizard.metrics @@ -141,6 +157,54 @@ HdrHistogram ${hdrhistogram.version} + + com.esri.geometry + esri-geometry-api + ${esri.version} + + + org.json + json + ${json.version} + + + org.apache.tinkerpop + gremlin-core + ${tinkerpop.version} + + + org.yaml + snakeyaml + + + com.carrotsearch + hppc + + + com.jcabi + * + + + net.objecthunter + exp4j + + + + + org.apache.tinkerpop + tinkergraph-gremlin + ${tinkerpop.version} + + + org.reactivestreams + reactive-streams + ${reactive-streams.version} + + + org.reactivestreams + reactive-streams-tck + ${reactive-streams.version} + com.github.stephenc.jcip jcip-annotations @@ -154,12 +218,7 @@ com.squareup javapoet - 1.11.1 - - - com.google.auto.service - auto-service - 1.0-rc4 + 1.13.0 junit @@ -179,7 +238,12 @@ org.mockito mockito-core - 2.25.0 + 2.28.2 + + + io.reactivex.rxjava2 + rxjava + ${rxjava.version} com.datastax.oss.simulacron @@ -191,6 +255,16 @@ commons-exec ${commons-exec.version} + + org.osgi + org.osgi.core + ${osgi.version} + + + org.apache.felix + org.apache.felix.framework + ${felix.version} + org.ops4j.pax.exam pax-exam-junit4 @@ -198,7 +272,7 @@ org.ops4j.pax.exam - pax-exam-container-native + pax-exam-container-forked ${pax-exam.version} @@ -207,9 +281,19 @@ ${pax-exam.version} - org.apache.felix - org.apache.felix.framework - 6.0.0 + org.ops4j.pax.url + pax-url-wrap + ${pax-url.version} + + + org.ops4j.pax.url + pax-url-reference + ${pax-url.version} + + + org.ops4j.pax.tinybundles + tinybundles + 3.0.0 org.glassfish @@ -259,7 +343,12 @@ javax.annotation javax.annotation-api - 1.2 + 1.3.2 + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} com.fasterxml.jackson.core @@ -269,23 +358,125 @@ com.google.testing.compile compile-testing - 0.15 + 0.19 + + + org.awaitility + awaitility + ${awaitility.version} + + + org.testng + testng + 7.3.0 + + + org.apache.directory.server + apacheds-core + ${apacheds.version} + + + org.slf4j + slf4j-log4j12 + + + + + org.apache.directory.server + apacheds-protocol-kerberos + ${apacheds.version} + + + org.apache.directory.server + apacheds-interceptor-kerberos + ${apacheds.version} + + + org.apache.directory.server + apacheds-protocol-ldap + ${apacheds.version} + + + org.apache.directory.server + apacheds-ldif-partition + ${apacheds.version} + + + org.apache.directory.server + apacheds-jdbm-partition + ${apacheds.version} + + + org.apache.directory.api + api-ldap-codec-standalone + 1.0.0-M26 + + + com.github.tomakehurst + wiremock + 2.25.0 + + + org.graalvm.sdk + graal-sdk + ${graalapi.version} + + + org.graalvm.nativeimage + svm + ${graalapi.version} + + + io.micrometer + micrometer-core + 1.6.5 + + + org.eclipse.microprofile.metrics + microprofile-metrics-api + 3.0 + + + io.smallrye + smallrye-metrics + 3.0.3 + + + io.projectreactor + reactor-bom + 2020.0.5 + pom + import + + + io.projectreactor.tools + blockhound + 1.0.8.RELEASE + + + io.projectreactor.tools + blockhound-junit-platform + 1.0.8.RELEASE - maven-compiler-plugin - 3.6.1 + 3.8.1 com.coveo fmt-maven-plugin 2.9 + + au.com.acegi + xml-format-maven-plugin + 3.1.1 + com.mycila license-maven-plugin @@ -293,19 +484,19 @@ maven-surefire-plugin - 2.19.1 + ${surefire.version} maven-failsafe-plugin - 2.19.1 + ${surefire.version} maven-shade-plugin - 3.1.1 + 3.2.3 maven-assembly-plugin - 3.1.0 + 3.3.0 @@ -322,15 +513,15 @@ maven-source-plugin - 3.0.1 + 3.1.0 maven-javadoc-plugin - 3.0.1 + 3.2.0 maven-jar-plugin - 3.0.2 + 3.2.0 org.sonatype.plugins @@ -339,7 +530,7 @@ maven-gpg-plugin - 1.5 + 1.6 maven-release-plugin @@ -347,39 +538,46 @@ maven-install-plugin - 2.4 + 2.5.2 maven-deploy-plugin - 2.7 + 2.8.2 maven-dependency-plugin - 3.1.1 + 3.1.2 org.jacoco jacoco-maven-plugin - 0.8.3 + 0.8.10 org.apache.felix maven-bundle-plugin - 3.5.1 + 5.1.1 org.revapi revapi-maven-plugin - 0.10.5 + 0.15.1 false \d+\.\d+\.\d+ + + + ${project.groupId}:${project.artifactId}:RELEASE + + + revapi.json + org.revapi revapi-java - 0.18.2 + 0.28.4 @@ -388,9 +586,38 @@ versions-maven-plugin 2.7 + + org.codehaus.mojo + flatten-maven-plugin + 1.2.1 + + + org.apache.maven.plugins + maven-enforcer-plugin + 3.5.0 + + + maven-enforcer-plugin + + + enforce-maven + + enforce + + + + + + [3.8.1,) + + + + + + maven-compiler-plugin @@ -400,6 +627,10 @@ 1.8 -Xep:FutureReturnValueIgnored:OFF + -Xep:PreferJavaTimeOverload:OFF + -Xep:AnnotateFormatMethod:OFF + -Xep:WildcardImport:WARN + -XepExcludedPaths:.*/target/(?:generated-sources|generated-test-sources)/.* true true @@ -409,12 +640,12 @@ org.codehaus.plexus plexus-compiler-javac-errorprone - 2.8 + 2.8.6 com.google.errorprone error_prone_core - 2.2.0 + 2.3.4 @@ -430,25 +661,46 @@ + + au.com.acegi + xml-format-maven-plugin + + + + xml-check + + + + + + .idea/** + **/target/** + **/dependency-reduced-pom.xml + **/.flattened-pom.xml + docs/** + + + com.mycila license-maven-plugin - +limitations under the License.]]> src/**/*.java src/**/*.xml @@ -456,6 +708,7 @@ limitations under the License.]]> **/pom.xml + src/**/native-image.properties **/src/main/config/ide/** @@ -495,12 +748,25 @@ limitations under the License.]]> maven-surefire-plugin + ${testing.jvm}/bin/java + + ${project.basedir}/src/test/resources/logback-test.xml + usedefaultlisteners false + ${skipUnitTests} + + + + maven-failsafe-plugin + + + ${project.basedir}/src/test/resources/logback-test.xml + @@ -509,7 +775,7 @@ limitations under the License.]]> true ossrh - https://oss.sonatype.org/ + https://repository.apache.org/ false true @@ -522,6 +788,12 @@ limitations under the License.]]> jar-no-fork + + + LICENSE_binary + NOTICE_binary.txt + + @@ -531,8 +803,13 @@ limitations under the License.]]> false true all,-missing - com.datastax.oss.driver.internal + com.datastax.*.driver.internal* + + apiNote + a + API note: + check-api-leaks @@ -568,6 +839,7 @@ limitations under the License.]]> -preventleak com.datastax.oss.driver.internal + com.datastax.dse.driver.internal -preventleak com.datastax.oss.driver.shaded @@ -648,15 +920,17 @@ limitations under the License.]]> check - - - - revapi.json - - + + org.apache.maven.plugins + maven-remote-resources-plugin + 1.7.0 + + true + + @@ -679,13 +953,96 @@ limitations under the License.]]> + + + fast + + true + true + true + true + true + true + true + true + + + + + test-jdk-environment + + + !testJavaHome + + + + ${env.JAVA_HOME} + + + + + test-jdk-specified + + + testJavaHome + + + + ${testJavaHome} + + + + + test-jdk-8 + + [8,) + + + + + test-jdk-11 + + [11,) + + + + + test-jdk-14 + + [14,) + + + + -XX:+AllowRedefinitionToAddDeleteMethods + + + + + test-jdk-17 + + [17,) + + + + -XX:+AllowRedefinitionToAddDeleteMethods + + --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED + + + + + test-jdk-21 + + [21,) + + + + -XX:+AllowRedefinitionToAddDeleteMethods + + --add-opens=java.base/jdk.internal.util.random=ALL-UNNAMED + + - - - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ - - Apache 2 diff --git a/pre-commit.sh b/pre-commit.sh index c87ea5bf9ca..912564ae81e 100755 --- a/pre-commit.sh +++ b/pre-commit.sh @@ -1,4 +1,20 @@ #!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. # STASH_NAME="pre-commit-$(date +%s)" # git stash save --keep-index $STASH_NAME diff --git a/query-builder/pom.xml b/query-builder/pom.xml index e2ac5398bd9..2bfe1bee8f5 100644 --- a/query-builder/pom.xml +++ b/query-builder/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-query-builder bundle - - DataStax Java driver for Apache Cassandra(R) - query builder - + Apache Cassandra Java Driver - query builder + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + - com.datastax.oss + org.apache.cassandra java-driver-core - ${project.version} - com.datastax.oss - java-driver-shaded-guava + org.apache.cassandra + java-driver-guava-shaded com.github.stephenc.jcip jcip-annotations + provided com.github.spotbugs spotbugs-annotations + provided junit @@ -62,27 +73,84 @@ assertj-core test + + org.apache.cassandra + java-driver-core + test + test-jar + - + + + src/main/resources + + + ${project.basedir}/.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + + src/test/resources + + project.properties + + true + + + src/test/resources + + project.properties + + false + + + + maven-jar-plugin + + + + com.datastax.oss.driver.querybuilder + + + + org.apache.felix maven-bundle-plugin com.datastax.oss.driver.querybuilder - - !net.jcip.annotations.*, - !edu.umd.cs.findbugs.annotations.*, - * - - - com.datastax.oss.driver.*.querybuilder.* - + !net.jcip.annotations.*, !edu.umd.cs.findbugs.annotations.*, * + com.datastax.oss.driver.*.querybuilder.*, com.datastax.dse.driver.*.querybuilder.* + + maven-dependency-plugin + + + generate-dependency-list + + list + + generate-resources + + runtime + true + com.datastax.cassandra,com.datastax.dse + ${project.build.outputDirectory}/com/datastax/dse/driver/internal/querybuilder/deps.txt + + + + diff --git a/query-builder/revapi.json b/query-builder/revapi.json index 0cf4e85f90d..ed97379332c 100644 --- a/query-builder/revapi.json +++ b/query-builder/revapi.json @@ -1,5 +1,3 @@ -// Configures Revapi (https://revapi.org/getting-started.html) to check API compatibility between -// successive driver versions. { "revapi": { "java": { @@ -7,12 +5,12 @@ "packages": { "regex": true, "exclude": [ - "com\\.datastax\\.oss\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", "org\\.assertj(\\..+)?", - // Don't re-check sibling modules that this module depends on - "com\\.datastax\\.oss\\.driver\\.api\\.core(\\..+)?" + "// Don't re-check sibling modules that this module depends on", + "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.core(\\..+)?" ] } } @@ -2772,8 +2770,21 @@ "code": "java.method.addedToInterface", "new": "method com.datastax.oss.driver.api.querybuilder.update.UpdateStart com.datastax.oss.driver.api.querybuilder.update.UpdateStart::usingTtl(int)", "justification": "JAVA-2210: Add ability to set TTL for modification queries" + }, + { + "code": "java.method.addedToInterface", + "new": "method com.datastax.oss.driver.api.querybuilder.select.Select com.datastax.oss.driver.api.querybuilder.select.Select::orderByAnnOf(java.lang.String, com.datastax.oss.driver.api.core.data.CqlVector)", + "justification": "JAVA-3118: Add support for vector data type in Schema Builder, QueryBuilder" + }, + { + "code": "java.method.addedToInterface", + "new": "method com.datastax.oss.driver.api.querybuilder.select.Select com.datastax.oss.driver.api.querybuilder.select.Select::orderByAnnOf(com.datastax.oss.driver.api.core.CqlIdentifier, com.datastax.oss.driver.api.core.data.CqlVector)", + "justification": "JAVA-3118: Add support for vector data type in Schema Builder, QueryBuilder" + }, + { + "code": "java.method.varargOverloadsOnlyDifferInVarargParameter", + "justification": "CASSJAVA-102: Suppress newly-supported varargs check" } ] } } - diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.java new file mode 100644 index 00000000000..24e606897e5 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseQueryBuilder.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder; + +import com.datastax.oss.driver.api.querybuilder.QueryBuilder; + +/** + * A DSE extension of the Cassandra driver's {@linkplain QueryBuilder query builder}. + * + *

          Note that, at this time, this class acts a simple pass-through: there is no DSE-specific + * syntax for DML queries, therefore it just inherits all of {@link QueryBuilder}'s methods, without + * adding any of its own. + * + *

          However, it is a good idea to use it as the entry point to the DSL in your DSE application, to + * avoid changing all your imports if specialized methods get added here in the future. + */ +public class DseQueryBuilder extends QueryBuilder { + // nothing to do +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.java new file mode 100644 index 00000000000..456746204b5 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/DseSchemaBuilder.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder; + +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseKeyspaceStart; +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseKeyspaceStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTableStart; +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultAlterDseKeyspace; +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultAlterDseTable; +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseAggregate; +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseFunction; +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseKeyspace; +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultCreateDseTable; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import com.datastax.oss.driver.api.querybuilder.schema.CreateAggregateStart; +import com.datastax.oss.driver.api.querybuilder.schema.CreateFunctionStart; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * An extension of {@link com.datastax.oss.driver.api.querybuilder.SchemaBuilder} for building + * schema entities that have DSE specific functionality. + */ +public class DseSchemaBuilder extends SchemaBuilder { + + /** + * Starts a CREATE AGGREGATE query with the given aggregate name. This assumes the keyspace name + * is already qualified for the Session or Statement. + */ + @NonNull + public static CreateDseAggregateStart createDseAggregate(@NonNull CqlIdentifier aggregateId) { + return new DefaultCreateDseAggregate(aggregateId); + } + + /** Starts a CREATE AGGREGATE query with the given aggregate name for the given keyspace name. */ + @NonNull + public static CreateDseAggregateStart createDseAggregate( + @Nullable CqlIdentifier keyspaceId, @NonNull CqlIdentifier aggregateId) { + return new DefaultCreateDseAggregate(keyspaceId, aggregateId); + } + + /** + * Shortcut for {@link #createDseAggregate(CqlIdentifier) + * createDseAggregate(CqlIdentifier.fromCql(aggregateName))}. + */ + @NonNull + public static CreateDseAggregateStart createDseAggregate(@NonNull String aggregateName) { + return new DefaultCreateDseAggregate(CqlIdentifier.fromCql(aggregateName)); + } + + /** + * Shortcut for {@link #createAggregate(CqlIdentifier, CqlIdentifier) + * createDseAggregate(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(aggregateName))}. + */ + @NonNull + public static CreateDseAggregateStart createDseAggregate( + @Nullable String keyspaceName, @NonNull String aggregateName) { + return new DefaultCreateDseAggregate( + keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), + CqlIdentifier.fromCql(aggregateName)); + } + + /** + * Starts a CREATE AGGREGATE query with the given aggregate name. This assumes the keyspace name + * is already qualified for the Session or Statement. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link + * #createDseAggregate(CqlIdentifier)}. + */ + @NonNull + public static CreateAggregateStart createAggregate(@NonNull CqlIdentifier aggregateName) { + return SchemaBuilder.createAggregate(aggregateName); + } + + /** + * Starts a CREATE AGGREGATE query with the given aggregate name for the given keyspace name. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link + * #createDseAggregate(CqlIdentifier, CqlIdentifier)}. + */ + @NonNull + public static CreateAggregateStart createAggregate( + @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier aggregateName) { + return SchemaBuilder.createAggregate(keyspace, aggregateName); + } + + /** + * Shortcut for {@link #createAggregate(CqlIdentifier) + * createAggregate(CqlIdentifier.fromCql(aggregateName)}. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link + * #createDseAggregate(String)}. + */ + @NonNull + public static CreateAggregateStart createAggregate(@NonNull String aggregateName) { + return SchemaBuilder.createAggregate(aggregateName); + } + + /** + * Shortcut for {@link #createAggregate(CqlIdentifier, CqlIdentifier) + * createAggregate(CqlIdentifier.fromCql(keyspace), CqlIdentifier.fromCql(aggregateName)}. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code DETERMINISTIC} keyword, use {@link + * #createDseAggregate(String, String)}. + */ + @NonNull + public static CreateAggregateStart createAggregate( + @Nullable String keyspace, @NonNull String aggregateName) { + return SchemaBuilder.createAggregate(keyspace, aggregateName); + } + + /** + * Starts a CREATE FUNCTION query with the given function name. This assumes the keyspace name is + * already qualified for the Session or Statement. + */ + @NonNull + public static CreateDseFunctionStart createDseFunction(@NonNull CqlIdentifier functionId) { + return new DefaultCreateDseFunction(functionId); + } + + /** Starts a CREATE FUNCTION query with the given function name for the given keyspace name. */ + @NonNull + public static CreateDseFunctionStart createDseFunction( + @Nullable CqlIdentifier keyspaceId, @NonNull CqlIdentifier functionId) { + return new DefaultCreateDseFunction(keyspaceId, functionId); + } + + /** + * Shortcut for {@link #createFunction(CqlIdentifier) + * createFunction(CqlIdentifier.fromCql(functionName)} + */ + @NonNull + public static CreateDseFunctionStart createDseFunction(@NonNull String functionName) { + return new DefaultCreateDseFunction(CqlIdentifier.fromCql(functionName)); + } + + /** + * Shortcut for {@link #createFunction(CqlIdentifier, CqlIdentifier) + * createFunction(CqlIdentifier.fromCql(keyspaceName), CqlIdentifier.fromCql(functionName)} + */ + @NonNull + public static CreateDseFunctionStart createDseFunction( + @Nullable String keyspaceName, @NonNull String functionName) { + return new DefaultCreateDseFunction( + keyspaceName == null ? null : CqlIdentifier.fromCql(keyspaceName), + CqlIdentifier.fromCql(functionName)); + } + + /** + * Starts a CREATE FUNCTION query with the given function name. This assumes the keyspace name is + * already qualified for the Session or Statement. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use + * {@link #createDseFunction(CqlIdentifier)}. + */ + @NonNull + public static CreateFunctionStart createFunction(@NonNull CqlIdentifier functionName) { + return SchemaBuilder.createFunction(functionName); + } + + /** + * Starts a CREATE FUNCTION query with the given function name for the given keyspace name. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use + * {@link #createDseFunction(CqlIdentifier,CqlIdentifier)}. + */ + @NonNull + public static CreateFunctionStart createFunction( + @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { + return SchemaBuilder.createFunction(keyspace, functionName); + } + + /** + * Shortcut for {@link #createFunction(CqlIdentifier, CqlIdentifier) + * createFunction(CqlIdentifier.fromCql(keyspace, functionName)} + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use + * {@link #createDseFunction(String)}. + */ + @NonNull + public static CreateFunctionStart createFunction(@NonNull String functionName) { + return SchemaBuilder.createFunction(functionName); + } + + /** + * Shortcut for {@link #createFunction(CqlIdentifier) + * createFunction(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(functionName)}. + * + *

          Note that this method only covers open-source Cassandra syntax. If you want to use + * DSE-specific features, such as the {@code MONOTONIC} or {@code DETERMINISTIC} keywords, use + * {@link #createDseFunction(String, String)}. + */ + @NonNull + public static CreateFunctionStart createFunction( + @Nullable String keyspace, @NonNull String functionName) { + return SchemaBuilder.createFunction(keyspace, functionName); + } + + /** Starts a CREATE KEYSPACE query. */ + @NonNull + public static CreateDseKeyspaceStart createDseKeyspace(@NonNull CqlIdentifier keyspaceName) { + return new DefaultCreateDseKeyspace(keyspaceName); + } + + /** + * Shortcut for {@link #createDseKeyspace(CqlIdentifier) + * createKeyspace(CqlIdentifier.fromCql(keyspaceName))} + */ + @NonNull + public static CreateDseKeyspaceStart createDseKeyspace(@NonNull String keyspaceName) { + return createDseKeyspace(CqlIdentifier.fromCql(keyspaceName)); + } + + /** Starts an ALTER KEYSPACE query. */ + @NonNull + public static AlterDseKeyspaceStart alterDseKeyspace(@NonNull CqlIdentifier keyspaceName) { + return new DefaultAlterDseKeyspace(keyspaceName); + } + + /** + * Shortcut for {@link #alterDseKeyspace(CqlIdentifier) + * alterKeyspace(CqlIdentifier.fromCql(keyspaceName)}. + */ + @NonNull + public static AlterDseKeyspaceStart alterDseKeyspace(@NonNull String keyspaceName) { + return DseSchemaBuilder.alterDseKeyspace(CqlIdentifier.fromCql(keyspaceName)); + } + + /** + * Starts a CREATE TABLE query with the given table name. This assumes the keyspace name is + * already qualified for the Session or Statement. + */ + @NonNull + public static CreateDseTableStart createDseTable(@NonNull CqlIdentifier tableName) { + return createDseTable(null, tableName); + } + + /** Starts a CREATE TABLE query with the given table name for the given keyspace name. */ + @NonNull + public static CreateDseTableStart createDseTable( + @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { + return new DefaultCreateDseTable(keyspace, tableName); + } + + /** + * Shortcut for {@link #createDseTable(CqlIdentifier) + * createDseTable(CqlIdentifier.fromCql(tableName)} + */ + @NonNull + public static CreateDseTableStart createDseTable(@NonNull String tableName) { + return createDseTable(CqlIdentifier.fromCql(tableName)); + } + + /** + * Shortcut for {@link #createDseTable(CqlIdentifier,CqlIdentifier) + * createDseTable(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(tableName)} + */ + @NonNull + public static CreateDseTableStart createDseTable( + @Nullable String keyspace, @NonNull String tableName) { + return createDseTable( + keyspace == null ? null : CqlIdentifier.fromCql(keyspace), + CqlIdentifier.fromCql(tableName)); + } + + /** + * Starts an ALTER TABLE query with the given table name. This assumes the keyspace name is + * already qualified for the Session or Statement. + */ + @NonNull + public static AlterDseTableStart alterDseTable(@NonNull CqlIdentifier tableName) { + return new DefaultAlterDseTable(tableName); + } + + /** Starts an ALTER TABLE query with the given table name for the given keyspace name. */ + @NonNull + public static AlterDseTableStart alterDseTable( + @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { + return new DefaultAlterDseTable(keyspace, tableName); + } + + /** + * Shortcut for {@link #alterDseTable(CqlIdentifier) + * alterDseTable(CqlIdentifier.fromCql(tableName)} + */ + @NonNull + public static AlterDseTableStart alterDseTable(@NonNull String tableName) { + return alterDseTable(CqlIdentifier.fromCql(tableName)); + } + + /** + * Shortcut for {@link #alterDseTable(CqlIdentifier,CqlIdentifier) + * alterDseTable(CqlIdentifier.fromCql(keyspaceName),CqlIdentifier.fromCql(tableName)} + */ + @NonNull + public static AlterDseTableStart alterDseTable( + @Nullable String keyspace, @NonNull String tableName) { + return alterDseTable( + keyspace == null ? null : CqlIdentifier.fromCql(keyspace), + CqlIdentifier.fromCql(tableName)); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/package-info.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/package-info.java new file mode 100644 index 00000000000..05f9d4e6912 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This package effectively mirrors the Cassandra OSS Query Builder package to allow DSE extended + * schema and query building for the DSE driver. In general, a class in this package should simply + * extend the equivalent class in the OSS driver and add extended functionality. + */ +package com.datastax.dse.driver.api.querybuilder; diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspace.java new file mode 100644 index 00000000000..c7aa795ae24 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspace.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceOptions; +import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceReplicationOptions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +public interface AlterDseKeyspace + extends BuildableQuery, + KeyspaceOptions, + KeyspaceReplicationOptions { + + @NonNull + @Override + AlterDseKeyspace withOption(@NonNull String name, @NonNull Object value); + + /** + * Adjusts durable writes configuration for this keyspace. If set to false, data written to the + * keyspace will bypass the commit log. + */ + @NonNull + @Override + AlterDseKeyspace withDurableWrites(boolean durableWrites); + + /** Adjusts the graph engine that will be used to interpret this keyspace. */ + @NonNull + AlterDseKeyspace withGraphEngine(String graphEngine); + + /** + * Adds 'replication' options. One should only use this when they have a custom replication + * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link + * #withNetworkTopologyStrategy(Map)}. + */ + @NonNull + @Override + AlterDseKeyspace withReplicationOptions(@NonNull Map replicationOptions); + + /** + * Adds SimpleStrategy replication options with the given replication factor. + * + *

          Note that using this will overwrite any previous use of this method or {@link + * #withNetworkTopologyStrategy(Map)}. + */ + @NonNull + @Override + AlterDseKeyspace withSimpleStrategy(int replicationFactor); + + /** + * Adds NetworkTopologyStrategy replication options with the given data center replication + * factors. + * + *

          Note that using this will overwrite any previous use of this method or {@link + * #withSimpleStrategy(int)}. + * + * @param replications Mapping of data center name to replication factor to use for that data + * center. + */ + @NonNull + @Override + AlterDseKeyspace withNetworkTopologyStrategy(@NonNull Map replications); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceStart.java new file mode 100644 index 00000000000..6a36d4b4d46 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceStart.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceOptions; +import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceReplicationOptions; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +public interface AlterDseKeyspaceStart + extends KeyspaceOptions, KeyspaceReplicationOptions { + + @NonNull + @Override + AlterDseKeyspace withOption(@NonNull String name, @NonNull Object value); + + /** + * Adjusts durable writes configuration for this keyspace. If set to false, data written to the + * keyspace will bypass the commit log. + */ + @NonNull + @Override + AlterDseKeyspace withDurableWrites(boolean durableWrites); + + /** Adjusts the graph engine that will be used to interpret this keyspace. */ + @NonNull + AlterDseKeyspace withGraphEngine(String graphEngine); + + /** + * Adds 'replication' options. One should only use this when they have a custom replication + * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link + * #withNetworkTopologyStrategy(Map)}. + */ + @NonNull + @Override + AlterDseKeyspace withReplicationOptions(@NonNull Map replicationOptions); + + /** + * Adds SimpleStrategy replication options with the given replication factor. + * + *

          Note that using this will overwrite any previous use of this method or {@link + * #withNetworkTopologyStrategy(Map)}. + */ + @NonNull + @Override + AlterDseKeyspace withSimpleStrategy(int replicationFactor); + + /** + * Adds NetworkTopologyStrategy replication options with the given data center replication + * factors. + * + *

          Note that using this will overwrite any previous use of this method or {@link + * #withSimpleStrategy(int)}. + * + * @param replications Mapping of data center name to replication factor to use for that data + * center. + */ + @NonNull + @Override + AlterDseKeyspace withNetworkTopologyStrategy(@NonNull Map replications); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumn.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumn.java new file mode 100644 index 00000000000..c5f05a661b9 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumn.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface AlterDseTableAddColumn { + /** + * Adds a column definition in the ALTER TABLE statement. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + AlterDseTableAddColumnEnd addColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #addColumn(CqlIdentifier, DataType) + * addColumn(CqlIdentifier.asCql(columnName), dataType)}. + */ + @NonNull + default AlterDseTableAddColumnEnd addColumn( + @NonNull String columnName, @NonNull DataType dataType) { + return addColumn(CqlIdentifier.fromCql(columnName), dataType); + } + + /** + * Adds a static column definition in the ALTER TABLE statement. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + AlterDseTableAddColumnEnd addStaticColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #addStaticColumn(CqlIdentifier, DataType) + * addStaticColumn(CqlIdentifier.asCql(columnName), dataType)}. + */ + @NonNull + default AlterDseTableAddColumnEnd addStaticColumn( + @NonNull String columnName, @NonNull DataType dataType) { + return addStaticColumn(CqlIdentifier.fromCql(columnName), dataType); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumnEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumnEnd.java new file mode 100644 index 00000000000..80d3cc2a665 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableAddColumnEnd.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; + +public interface AlterDseTableAddColumnEnd extends AlterDseTableAddColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumn.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumn.java new file mode 100644 index 00000000000..50e672f8e6e --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumn.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface AlterDseTableDropColumn { + /** + * Adds column(s) to drop to ALTER TABLE specification. This may be repeated with successive calls + * to drop columns. + */ + @NonNull + AlterDseTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames); + + /** Shortcut for {@link #dropColumns(CqlIdentifier...)}. */ + @NonNull + default AlterDseTableDropColumnEnd dropColumns(@NonNull String... columnNames) { + CqlIdentifier ids[] = new CqlIdentifier[columnNames.length]; + for (int i = 0; i < columnNames.length; i++) { + ids[i] = CqlIdentifier.fromCql(columnNames[i]); + } + return dropColumns(ids); + } + + /** + * Adds a column to drop to ALTER TABLE specification. This may be repeated with successive calls + * to drop columns. Shortcut for {@link #dropColumns(CqlIdentifier...) #dropColumns(columnName)}. + */ + @NonNull + default AlterDseTableDropColumnEnd dropColumn(@NonNull CqlIdentifier columnName) { + return dropColumns(columnName); + } + + /** + * Shortcut for {@link #dropColumn(CqlIdentifier) dropColumn(CqlIdentifier.fromCql(columnName))}. + */ + @NonNull + default AlterDseTableDropColumnEnd dropColumn(@NonNull String columnName) { + return dropColumns(CqlIdentifier.fromCql(columnName)); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumnEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumnEnd.java new file mode 100644 index 00000000000..7e3d424eb31 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableDropColumnEnd.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; + +public interface AlterDseTableDropColumnEnd extends AlterDseTableDropColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumn.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumn.java new file mode 100644 index 00000000000..7a24a76f4ab --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumn.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface AlterDseTableRenameColumn { + + /** + * Adds a column rename to ALTER TABLE specification. This may be repeated with successive calls + * to rename columns. + */ + @NonNull + AlterDseTableRenameColumnEnd renameColumn(@NonNull CqlIdentifier from, @NonNull CqlIdentifier to); + + /** + * Shortcut for {@link #renameColumn(CqlIdentifier, CqlIdentifier) + * renameField(CqlIdentifier.fromCql(from),CqlIdentifier.fromCql(to))}. + */ + @NonNull + default AlterDseTableRenameColumnEnd renameColumn(@NonNull String from, @NonNull String to) { + return renameColumn(CqlIdentifier.fromCql(from), CqlIdentifier.fromCql(to)); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumnEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumnEnd.java new file mode 100644 index 00000000000..db2890b844b --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableRenameColumnEnd.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; + +public interface AlterDseTableRenameColumnEnd extends AlterDseTableRenameColumn, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableStart.java new file mode 100644 index 00000000000..bb34bb3fb38 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableStart.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +public interface AlterDseTableStart + extends AlterDseTableWithOptions, + AlterDseTableAddColumn, + AlterDseTableDropColumn, + AlterDseTableRenameColumn, + DseTableGraphOptions { + + /** Completes ALTER TABLE specifying that compact storage should be removed from the table. */ + @NonNull + BuildableQuery dropCompactStorage(); + + /** + * Completes ALTER TABLE specifying the the type of a column should be changed. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #alterColumn(CqlIdentifier, DataType) + * alterColumn(CqlIdentifier.fromCql(columnName,dataType)}. + */ + @NonNull + default BuildableQuery alterColumn(@NonNull String columnName, @NonNull DataType dataType) { + return alterColumn(CqlIdentifier.fromCql(columnName), dataType); + } + + /** Removes the named vertex label from this table. */ + @NonNull + BuildableQuery withoutVertexLabel(@Nullable CqlIdentifier vertexLabelId); + + /** + * Shortcut for {@link #withoutVertexLabel(CqlIdentifier) + * withoutVertexLabel(CqlIdentifier.fromCql(vertexLabelName))}. + */ + @NonNull + default BuildableQuery withoutVertexLabel(@NonNull String vertexLabelName) { + return withoutVertexLabel(CqlIdentifier.fromCql(vertexLabelName)); + } + + /** + * Removes the anonymous vertex label from this table. + * + *

          This is a shortcut for {@link #withoutVertexLabel(CqlIdentifier) withoutVertexLabel(null)}. + */ + @NonNull + default BuildableQuery withoutVertexLabel() { + return withoutVertexLabel((CqlIdentifier) null); + } + + /** Removes the named edge label from this table. */ + @NonNull + BuildableQuery withoutEdgeLabel(@Nullable CqlIdentifier edgeLabelId); + + /** + * Shortcut for {@link #withoutEdgeLabel(CqlIdentifier) + * withoutEdgeLabel(CqlIdentifier.fromCql(edgeLabelName))}. + */ + @NonNull + default BuildableQuery withoutEdgeLabel(@NonNull String edgeLabelName) { + return withoutEdgeLabel(CqlIdentifier.fromCql(edgeLabelName)); + } + + /** + * Removes the anonymous edge label from this table. + * + *

          This is a shortcut for {@link #withoutVertexLabel(CqlIdentifier) withoutEdgeLabel(null)}. + */ + @NonNull + default BuildableQuery withoutEdgeLabel() { + return withoutEdgeLabel((CqlIdentifier) null); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptions.java new file mode 100644 index 00000000000..5713c3f25d6 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptions.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +public interface AlterDseTableWithOptions extends DseRelationOptions {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptionsEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptionsEnd.java new file mode 100644 index 00000000000..ef63881caa8 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableWithOptionsEnd.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; + +public interface AlterDseTableWithOptionsEnd + extends DseRelationOptions, BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateEnd.java new file mode 100644 index 00000000000..e28c887cd22 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateEnd.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.api.querybuilder.term.Term; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseAggregateEnd extends BuildableQuery { + + /** + * Adds INITCOND to the aggregate query. Defines the initial condition, values, of the first + * parameter in the SFUNC. + */ + @NonNull + CreateDseAggregateEnd withInitCond(@NonNull Term term); + + /** + * Adds FINALFUNC to the create aggregate query. This is used to specify what type is returned + * from the state function. + */ + @NonNull + CreateDseAggregateEnd withFinalFunc(@NonNull CqlIdentifier finalFunc); + + /** + * Shortcut for {@link #withFinalFunc(CqlIdentifier) + * withFinalFunc(CqlIdentifier.fromCql(finalFuncName))}. + */ + @NonNull + default CreateDseAggregateEnd withFinalFunc(@NonNull String finalFuncName) { + return withFinalFunc(CqlIdentifier.fromCql(finalFuncName)); + } + + /** + * Adds "DETERMINISTIC" to create aggregate specification. This is used to specify that this + * aggregate always returns the same output for a given input. Requires an initial condition and + * returns a single value. + */ + @NonNull + CreateDseAggregateEnd deterministic(); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStart.java new file mode 100644 index 00000000000..76bece6ca5f --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStart.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseAggregateStart { + /** + * Adds IF NOT EXISTS to the create aggregate specification. This indicates that the aggregate + * should not be created if it already exists. + */ + @NonNull + CreateDseAggregateStart ifNotExists(); + + /** + * Adds OR REPLACE to the create aggregate specification. This indicates that the aggregate should + * replace an existing aggregate with the same name if it exists. + */ + @NonNull + CreateDseAggregateStart orReplace(); + + /** + * Adds a parameter definition in the CREATE AGGREGATE statement. + * + *

          Parameter keys are added in the order of their declaration. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseAggregateStart withParameter(@NonNull DataType paramType); + + /** Adds SFUNC to the create aggregate specification. This is the state function for each row. */ + @NonNull + CreateDseAggregateStateFunc withSFunc(@NonNull CqlIdentifier sfuncName); + + /** Shortcut for {@link #withSFunc(CqlIdentifier) withSFunc(CqlIdentifier.fromCql(sfuncName))}. */ + @NonNull + default CreateDseAggregateStateFunc withSFunc(@NonNull String sfuncName) { + return withSFunc(CqlIdentifier.fromCql(sfuncName)); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStateFunc.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStateFunc.java new file mode 100644 index 00000000000..deb3b49a34a --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseAggregateStateFunc.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseAggregateStateFunc { + + /** + * Adds STYPE to the create aggregate query. This is used to specify what type is returned from + * the state function. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseAggregateEnd withSType(@NonNull DataType dataType); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionEnd.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionEnd.java new file mode 100644 index 00000000000..901eb1705ab --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionEnd.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; + +public interface CreateDseFunctionEnd extends BuildableQuery {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionStart.java new file mode 100644 index 00000000000..64a741d62a9 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionStart.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseFunctionStart { + + /** + * Adds IF NOT EXISTS to the create function specification. This indicates that the function + * should not be created if it already exists. + */ + @NonNull + CreateDseFunctionStart ifNotExists(); + + /** + * Adds OR REPLACE to the create function specification. This indicates that the function should + * replace an existing function with the same name if it exists. + */ + @NonNull + CreateDseFunctionStart orReplace(); + + /** + * Adds a parameter definition in the CREATE FUNCTION statement. + * + *

          Parameter keys are added in the order of their declaration. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseFunctionStart withParameter( + @NonNull CqlIdentifier paramName, @NonNull DataType paramType); + + /** + * Shortcut for {@link #withParameter(CqlIdentifier, DataType) + * withParameter(CqlIdentifier.asCql(paramName), dataType)}. + */ + @NonNull + default CreateDseFunctionStart withParameter( + @NonNull String paramName, @NonNull DataType paramType) { + return withParameter(CqlIdentifier.fromCql(paramName), paramType); + } + + /** + * Adds RETURNS NULL ON NULL to the create function specification. This indicates that the body of + * the function should be skipped when null input is provided. + */ + @NonNull + CreateDseFunctionWithNullOption returnsNullOnNull(); + + /** + * Adds CALLED ON NULL to the create function specification. This indicates that the body of the + * function not be skipped when null input is provided. + */ + @NonNull + CreateDseFunctionWithNullOption calledOnNull(); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithLanguage.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithLanguage.java new file mode 100644 index 00000000000..10935061404 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithLanguage.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseFunctionWithLanguage { + + /** + * Adds AS to the create function specification. This is used to specify the body of the function. + * Note that it is expected that the provided body is properly quoted as this method does not make + * that decision for the user. For simple cases, one should wrap the input in single quotes, i.e. + * 'myBody'. If the body itself contains single quotes, one could use a + * postgres-style string literal, which is surrounded in two dollar signs, i.e. $$ myBody $$ + * . + */ + @NonNull + CreateDseFunctionEnd as(@NonNull String functionBody); + + /** + * Adds AS to the create function specification and quotes the function body. Assumes that if the + * input body contains at least one single quote, to quote the body with two dollar signs, i.e. + * $$ myBody $$, otherwise the body is quoted with single quotes, i.e. + * ' myBody '. If the function body is already quoted {@link #as(String)} should be used + * instead. + */ + @NonNull + default CreateDseFunctionEnd asQuoted(@NonNull String functionBody) { + if (functionBody.contains("'")) { + return as("$$ " + functionBody + " $$"); + } else { + return as('\'' + functionBody + '\''); + } + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithNullOption.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithNullOption.java new file mode 100644 index 00000000000..2a44c002852 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithNullOption.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseFunctionWithNullOption { + /** + * Adds RETURNS to the create function specification. This is used to specify what type is + * returned from the function. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseFunctionWithType returnsType(@NonNull DataType dataType); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithType.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithType.java new file mode 100644 index 00000000000..b70facf51a1 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseFunctionWithType.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseFunctionWithType { + /** + * Adds LANGUAGE to the create function specification. This is used to specify what language is + * used in the function body. + */ + @NonNull + CreateDseFunctionWithLanguage withLanguage(@NonNull String language); + + /** + * Adds "LANGUAGE java" to create function specification. Shortcut for {@link + * #withLanguage(String) withLanguage("java")}. + */ + @NonNull + default CreateDseFunctionWithLanguage withJavaLanguage() { + return withLanguage("java"); + } + + /** + * Adds "LANGUAGE javascript" to create function specification. Shortcut for {@link + * #withLanguage(String) withLanguage("javascript")}. + */ + @NonNull + default CreateDseFunctionWithLanguage withJavaScriptLanguage() { + return withLanguage("javascript"); + } + + /** + * Adds "DETERMINISTIC" to create function specification. This is used to specify that this + * function always returns the same output for a given input. + */ + @NonNull + CreateDseFunctionWithType deterministic(); + + /** + * Adds "MONOTONIC" to create function specification. This is used to specify that this function + * is either entirely non-increasing, or entirely non-decreasing. + */ + @NonNull + CreateDseFunctionWithType monotonic(); + + /** + * Adds "MONOTONIC ON" to create function specification. This is used to specify that this + * function has only a single column that is monotonic. If the function is fully monotonic, use + * {@link #monotonic()} instead. + */ + @NonNull + CreateDseFunctionWithType monotonicOn(@NonNull CqlIdentifier monotonicColumn); + + /** + * Shortcut for {@link #monotonicOn(CqlIdentifier) + * monotonicOn(CqlIdentifier.fromCql(monotonicColumn))}. + */ + @NonNull + default CreateDseFunctionWithType monotonicOn(@NonNull String monotonicColumn) { + return monotonicOn(CqlIdentifier.fromCql(monotonicColumn)); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspace.java new file mode 100644 index 00000000000..0fcb87bafbd --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspace.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceOptions; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseKeyspace extends BuildableQuery, KeyspaceOptions { + + @NonNull + @Override + CreateDseKeyspace withOption(@NonNull String name, @NonNull Object value); + + /** + * Adjusts durable writes configuration for this keyspace. If set to false, data written to the + * keyspace will bypass the commit log. + */ + @NonNull + @Override + default CreateDseKeyspace withDurableWrites(boolean durableWrites) { + return withOption("durable_writes", durableWrites); + } + + /** Adjusts the graph engine that will be used to interpret this keyspace. */ + @NonNull + default CreateDseKeyspace withGraphEngine(String graphEngine) { + return this.withOption("graph_engine", graphEngine); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceStart.java new file mode 100644 index 00000000000..c0ee240c8ff --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceStart.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.schema.KeyspaceReplicationOptions; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +public interface CreateDseKeyspaceStart extends KeyspaceReplicationOptions { + /** + * Adds IF NOT EXISTS to the create keyspace specification. This indicates that the keyspace + * should not be created it already exists. + */ + @NonNull + CreateDseKeyspaceStart ifNotExists(); + + /** + * Adds 'replication' options. One should only use this when they have a custom replication + * strategy, otherwise it is advisable to use {@link #withSimpleStrategy(int)} or {@link + * #withNetworkTopologyStrategy(Map)}. + */ + @NonNull + @Override + CreateDseKeyspace withReplicationOptions(@NonNull Map replicationOptions); + + /** + * Adds SimpleStrategy replication options with the given replication factor. + * + *

          Note that using this will overwrite any previous use of this method or {@link + * #withNetworkTopologyStrategy(Map)}. + */ + @NonNull + @Override + default CreateDseKeyspace withSimpleStrategy(int replicationFactor) { + ImmutableMap replication = + ImmutableMap.builder() + .put("class", "SimpleStrategy") + .put("replication_factor", replicationFactor) + .build(); + + return withReplicationOptions(replication); + } + + /** + * Adds NetworkTopologyStrategy replication options with the given data center replication + * factors. + * + *

          Note that using this will overwrite any previous use of this method or {@link + * #withSimpleStrategy(int)}. + * + * @param replications Mapping of data center name to replication factor to use for that data + * center. + */ + @NonNull + @Override + default CreateDseKeyspace withNetworkTopologyStrategy( + @NonNull Map replications) { + ImmutableMap.Builder replicationBuilder = + ImmutableMap.builder().put("class", "NetworkTopologyStrategy"); + + for (Map.Entry replication : replications.entrySet()) { + replicationBuilder.put(replication.getKey(), replication.getValue()); + } + + return withReplicationOptions(replicationBuilder.build()); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTable.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTable.java new file mode 100644 index 00000000000..fa6d008c114 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTable.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseTable + extends BuildableQuery, OngoingDsePartitionKey, CreateDseTableWithOptions { + + /** + * Adds a clustering column definition in the CREATE TABLE statement. + * + *

          This includes the column declaration (you don't need an additional {@link + * #withColumn(CqlIdentifier, DataType) addColumn} call). + * + *

          Clustering key columns are added in the order of their declaration. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseTable withClusteringColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #withClusteringColumn(CqlIdentifier, DataType) + * withClusteringColumn(CqlIdentifier.asCql(columnName), dataType)}. + */ + @NonNull + default CreateDseTable withClusteringColumn( + @NonNull String columnName, @NonNull DataType dataType) { + return withClusteringColumn(CqlIdentifier.fromCql(columnName), dataType); + } + + /** + * Adds a column definition in the CREATE TABLE statement. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseTable withColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #withColumn(CqlIdentifier, DataType) + * withColumn(CqlIdentifier.asCql(columnName), dataType)}. + */ + @NonNull + default CreateDseTable withColumn(@NonNull String columnName, @NonNull DataType dataType) { + return withColumn(CqlIdentifier.fromCql(columnName), dataType); + } + + /** + * Adds a static column definition in the CREATE TABLE statement. + * + *

          This includes the column declaration (you don't need an additional {@link + * #withColumn(CqlIdentifier, DataType) addColumn} call). + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseTable withStaticColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #withStaticColumn(CqlIdentifier, DataType) + * withStaticColumn(CqlIdentifier.asCql(columnName), dataType)}. + */ + @NonNull + default CreateDseTable withStaticColumn(@NonNull String columnName, @NonNull DataType dataType) { + return withStaticColumn(CqlIdentifier.fromCql(columnName), dataType); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableStart.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableStart.java new file mode 100644 index 00000000000..030668262df --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableStart.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseTableStart extends OngoingDsePartitionKey { + + /** + * Adds IF NOT EXISTS to the create table specification. This indicates that the table should not + * be created if it already exists. + */ + @NonNull + CreateDseTableStart ifNotExists(); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableWithOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableWithOptions.java new file mode 100644 index 00000000000..3d7b44ef905 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableWithOptions.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface CreateDseTableWithOptions + extends BuildableQuery, + DseRelationStructure, + DseTableGraphOptions { + + /** Enables COMPACT STORAGE in the CREATE TABLE statement. */ + @NonNull + CreateDseTableWithOptions withCompactStorage(); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseGraphEdgeSide.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseGraphEdgeSide.java new file mode 100644 index 00000000000..7cfe8285919 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseGraphEdgeSide.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.dse.driver.internal.querybuilder.schema.DefaultDseGraphEdgeSide; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; + +public interface DseGraphEdgeSide { + + /** Starts the definition of a graph edge side by designating the from/to table. */ + @NonNull + static DseGraphEdgeSide table(@NonNull CqlIdentifier tableId) { + return new DefaultDseGraphEdgeSide(tableId); + } + + /** Shortcut for {@link #table(CqlIdentifier) table(CqlIdentifier.fromCql(tableName))}. */ + @NonNull + static DseGraphEdgeSide table(@NonNull String tableName) { + return table(CqlIdentifier.fromCql(tableName)); + } + + /** + * Adds a partition key column to the primary key definition of this edge side. + * + *

          Call this method multiple times if the partition key is composite. + */ + @NonNull + DseGraphEdgeSide withPartitionKey(@NonNull CqlIdentifier columnId); + + /** + * Shortcut for {@link #withPartitionKey(CqlIdentifier) + * withPartitionKey(CqlIdentifier.fromCql(columnName))}. + */ + @NonNull + default DseGraphEdgeSide withPartitionKey(@NonNull String columnName) { + return withPartitionKey(CqlIdentifier.fromCql(columnName)); + } + + /** + * Adds a clustering column to the primary key definition of this edge side. + * + *

          Call this method multiple times to add more than one clustering column. + */ + @NonNull + DseGraphEdgeSide withClusteringColumn(@NonNull CqlIdentifier columnId); + + /** + * Shortcut for {@link #withClusteringColumn(CqlIdentifier) + * withClusteringColumn(CqlIdentifier.fromCql(columnName))}. + */ + @NonNull + default DseGraphEdgeSide withClusteringColumn(@NonNull String columnName) { + return withClusteringColumn(CqlIdentifier.fromCql(columnName)); + } + + @NonNull + CqlIdentifier getTableId(); + + @NonNull + List getPartitionKeyColumns(); + + @NonNull + List getClusteringColumns(); +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationOptions.java new file mode 100644 index 00000000000..170390c43d5 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationOptions.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.querybuilder.schema.RelationOptions; + +public interface DseRelationOptions> + extends RelationOptions {} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationStructure.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationStructure.java new file mode 100644 index 00000000000..f26039e45b4 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseRelationStructure.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.internal.core.CqlIdentifiers; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; + +public interface DseRelationStructure> + extends DseRelationOptions { + + /** + * Adds the provided CLUSTERING ORDER. + * + *

          They will be appended in the iteration order of the provided map. If an ordering was already + * defined for a given identifier, it will be removed and the new ordering will appear in its + * position in the provided map. + */ + @NonNull + SelfT withClusteringOrderByIds(@NonNull Map orderings); + + /** + * Shortcut for {@link #withClusteringOrderByIds(Map)} with the columns specified as + * case-insensitive names. They will be wrapped with {@link CqlIdentifier#fromCql(String)}. + * + *

          Note that it's possible for two different case-sensitive names to resolve to the same + * identifier, for example "foo" and "Foo"; if this happens, a runtime exception will be thrown. + */ + @NonNull + default SelfT withClusteringOrder(@NonNull Map orderings) { + return withClusteringOrderByIds(CqlIdentifiers.wrapKeys(orderings)); + } + + /** + * Adds the provided clustering order. + * + *

          If clustering order was already defined for this identifier, it will be removed and the new + * clause will be appended at the end of the current clustering order. + */ + @NonNull + SelfT withClusteringOrder(@NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order); + + /** + * Shortcut for {@link #withClusteringOrder(CqlIdentifier, ClusteringOrder) + * withClusteringOrder(CqlIdentifier.fromCql(columnName), order)}. + */ + @NonNull + default SelfT withClusteringOrder(@NonNull String columnName, @NonNull ClusteringOrder order) { + return withClusteringOrder(CqlIdentifier.fromCql(columnName), order); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseTableGraphOptions.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseTableGraphOptions.java new file mode 100644 index 00000000000..df1008ff053 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/DseTableGraphOptions.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +public interface DseTableGraphOptions { + + /** Adds a vertex label to this table. */ + @NonNull + NextT withVertexLabel(@Nullable CqlIdentifier vertexLabelId); + + /** + * Shortcut for {@link #withVertexLabel(CqlIdentifier) + * withVertexLabel(CqlIdentifier.fromCql(vertexLabel))}. + */ + @NonNull + default NextT withVertexLabel(@NonNull String vertexLabelName) { + return withVertexLabel(CqlIdentifier.fromCql(vertexLabelName)); + } + + /** + * Adds an anonymous vertex label to this table. + * + *

          This is a shortcut for {@link #withVertexLabel(CqlIdentifier) withVertexLabel(null)}. + */ + @NonNull + default NextT withVertexLabel() { + return withVertexLabel((CqlIdentifier) null); + } + + /** + * Adds an edge label to this table. + * + *

          Use {@link DseGraphEdgeSide#table(CqlIdentifier)} to build the definitions of both sides, + * for example: + * + *

          {@code
          +   * withEdgeLabel("contrib",
          +   *               table("person")
          +   *                 .withPartitionKey("contributor"),
          +   *               table("soft")
          +   *                 .withPartitionKey("company_name"),
          +   *                 .withPartitionKey("software_name"),
          +   *                 .withClusteringColumn("software_version"))
          +   * }
          + */ + @NonNull + NextT withEdgeLabel( + @Nullable CqlIdentifier edgeLabelId, + @NonNull DseGraphEdgeSide from, + @NonNull DseGraphEdgeSide to); + + /** + * Shortcut for {@link #withEdgeLabel(CqlIdentifier, DseGraphEdgeSide, DseGraphEdgeSide) + * withEdgeLabel(CqlIdentifier.fromCql(edgeLabelName), from, to)}. + */ + @NonNull + default NextT withEdgeLabel( + @NonNull String edgeLabelName, @NonNull DseGraphEdgeSide from, @NonNull DseGraphEdgeSide to) { + return withEdgeLabel(CqlIdentifier.fromCql(edgeLabelName), from, to); + } + + /** + * Adds an anonymous edge label to this table. + * + *

          This is a shortcut for {@link #withEdgeLabel(CqlIdentifier, DseGraphEdgeSide, + * DseGraphEdgeSide) withEdgeLabel(null, from, to)}. + */ + @NonNull + default NextT withEdgeLabel(@NonNull DseGraphEdgeSide from, @NonNull DseGraphEdgeSide to) { + return withEdgeLabel((CqlIdentifier) null, from, to); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/OngoingDsePartitionKey.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/OngoingDsePartitionKey.java new file mode 100644 index 00000000000..d535939994b --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/OngoingDsePartitionKey.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import com.datastax.oss.driver.api.querybuilder.schema.CreateTable; +import edu.umd.cs.findbugs.annotations.NonNull; + +public interface OngoingDsePartitionKey { + + /** + * Adds a partition key column definition. + * + *

          This includes the column declaration (you don't need an additional {@link + * CreateTable#withColumn(CqlIdentifier, DataType) addColumn} call). + * + *

          Partition keys are added in the order of their declaration. + * + *

          To create the data type, use the constants and static methods in {@link DataTypes}, or + * {@link SchemaBuilder#udt(CqlIdentifier, boolean)}. + */ + @NonNull + CreateDseTable withPartitionKey(@NonNull CqlIdentifier columnName, @NonNull DataType dataType); + + /** + * Shortcut for {@link #withPartitionKey(CqlIdentifier, DataType) + * withPartitionKey(CqlIdentifier.asCql(columnName), dataType)}. + */ + @NonNull + default CreateDseTable withPartitionKey(@NonNull String columnName, @NonNull DataType dataType) { + return withPartitionKey(CqlIdentifier.fromCql(columnName), dataType); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/package-info.java b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/package-info.java new file mode 100644 index 00000000000..5e647ee3a4d --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/api/querybuilder/schema/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This package effectively mirrors the Cassandra OSS Schema interfaces to allow extended schema and + * query building for the DSE driver. NOTE: Changes made to the OSS driver will need to be mirrored + * here if the OSS driver changes affect an extended schema build strategy for the DSE driver. + */ +package com.datastax.dse.driver.api.querybuilder.schema; diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseKeyspace.java new file mode 100644 index 00000000000..6fa2a64eaf3 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseKeyspace.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseKeyspace; +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseKeyspaceStart; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultAlterDseKeyspace implements AlterDseKeyspaceStart, AlterDseKeyspace { + + private final CqlIdentifier keyspaceName; + private final ImmutableMap options; + + public DefaultAlterDseKeyspace(@NonNull CqlIdentifier keyspaceName) { + this(keyspaceName, ImmutableMap.of()); + } + + public DefaultAlterDseKeyspace( + @NonNull CqlIdentifier keyspaceName, @NonNull ImmutableMap options) { + this.keyspaceName = keyspaceName; + this.options = options; + } + + @NonNull + @Override + public AlterDseKeyspace withDurableWrites(boolean durableWrites) { + return withOption("durable_writes", durableWrites); + } + + @NonNull + @Override + public AlterDseKeyspace withGraphEngine(String graphEngine) { + return this.withOption("graph_engine", graphEngine); + } + + @NonNull + @Override + public AlterDseKeyspace withReplicationOptions(@NonNull Map replicationOptions) { + return withOption("replication", replicationOptions); + } + + @NonNull + @Override + public AlterDseKeyspace withSimpleStrategy(int replicationFactor) { + ImmutableMap replication = + ImmutableMap.builder() + .put("class", "SimpleStrategy") + .put("replication_factor", replicationFactor) + .build(); + + return withReplicationOptions(replication); + } + + @NonNull + @Override + public AlterDseKeyspace withNetworkTopologyStrategy(@NonNull Map replications) { + ImmutableMap.Builder replicationBuilder = + ImmutableMap.builder().put("class", "NetworkTopologyStrategy"); + + for (Map.Entry replication : replications.entrySet()) { + replicationBuilder.put(replication.getKey(), replication.getValue()); + } + + return withReplicationOptions(replicationBuilder.build()); + } + + @NonNull + @Override + public AlterDseKeyspace withOption(@NonNull String name, @NonNull Object value) { + return new DefaultAlterDseKeyspace( + keyspaceName, ImmutableCollections.append(options, name, value)); + } + + @NonNull + @Override + public String asCql() { + return "ALTER KEYSPACE " + keyspaceName.asCql(true) + OptionsUtils.buildOptions(options, true); + } + + @NonNull + @Override + public Map getOptions() { + return options; + } + + @NonNull + public CqlIdentifier getKeyspace() { + return keyspaceName; + } + + @Override + public String toString() { + return asCql(); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseTable.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseTable.java new file mode 100644 index 00000000000..5f2ad10b7d1 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultAlterDseTable.java @@ -0,0 +1,485 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import static com.datastax.oss.driver.internal.querybuilder.schema.Utils.appendSet; + +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableAddColumnEnd; +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableDropColumnEnd; +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableRenameColumnEnd; +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableStart; +import com.datastax.dse.driver.api.querybuilder.schema.AlterDseTableWithOptionsEnd; +import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.internal.querybuilder.CqlHelper; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultAlterDseTable + implements AlterDseTableStart, + AlterDseTableAddColumnEnd, + AlterDseTableDropColumnEnd, + AlterDseTableRenameColumnEnd, + AlterDseTableWithOptionsEnd, + BuildableQuery { + + private final CqlIdentifier keyspace; + private final CqlIdentifier tableName; + + private final ImmutableMap columnsToAddInOrder; + private final ImmutableSet columnsToAdd; + private final ImmutableSet columnsToAddStatic; + private final ImmutableSet columnsToDrop; + private final ImmutableMap columnsToRename; + private final CqlIdentifier columnToAlter; + private final DataType columnToAlterType; + private final DseTableVertexOperation vertexOperation; + private final DseTableEdgeOperation edgeOperation; + private final ImmutableMap options; + private final boolean dropCompactStorage; + + public DefaultAlterDseTable(@NonNull CqlIdentifier tableName) { + this(null, tableName); + } + + public DefaultAlterDseTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { + this( + keyspace, + tableName, + false, + ImmutableMap.of(), + ImmutableSet.of(), + ImmutableSet.of(), + ImmutableSet.of(), + ImmutableMap.of(), + null, + null, + null, + null, + ImmutableMap.of()); + } + + public DefaultAlterDseTable( + @Nullable CqlIdentifier keyspace, + @NonNull CqlIdentifier tableName, + boolean dropCompactStorage, + @NonNull ImmutableMap columnsToAddInOrder, + @NonNull ImmutableSet columnsToAdd, + @NonNull ImmutableSet columnsToAddStatic, + @NonNull ImmutableSet columnsToDrop, + @NonNull ImmutableMap columnsToRename, + @Nullable CqlIdentifier columnToAlter, + @Nullable DataType columnToAlterType, + @Nullable DseTableVertexOperation vertexOperation, + @Nullable DseTableEdgeOperation edgeOperation, + @NonNull ImmutableMap options) { + this.keyspace = keyspace; + this.tableName = tableName; + this.dropCompactStorage = dropCompactStorage; + this.columnsToAddInOrder = columnsToAddInOrder; + this.columnsToAdd = columnsToAdd; + this.columnsToAddStatic = columnsToAddStatic; + this.columnsToDrop = columnsToDrop; + this.columnsToRename = columnsToRename; + this.columnToAlter = columnToAlter; + this.columnToAlterType = columnToAlterType; + this.vertexOperation = vertexOperation; + this.edgeOperation = edgeOperation; + this.options = options; + } + + @NonNull + @Override + public AlterDseTableAddColumnEnd addColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + ImmutableCollections.append(columnsToAddInOrder, columnName, dataType), + appendSet(columnsToAdd, columnName), + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public AlterDseTableAddColumnEnd addStaticColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + ImmutableCollections.append(columnsToAddInOrder, columnName, dataType), + columnsToAdd, + appendSet(columnsToAddStatic, columnName), + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public BuildableQuery dropCompactStorage() { + return new DefaultAlterDseTable( + keyspace, + tableName, + true, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public AlterDseTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames) { + ImmutableSet.Builder builder = + ImmutableSet.builder().addAll(columnsToDrop); + for (CqlIdentifier columnName : columnNames) { + builder = builder.add(columnName); + } + + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + builder.build(), + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public AlterDseTableRenameColumnEnd renameColumn( + @NonNull CqlIdentifier from, @NonNull CqlIdentifier to) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + ImmutableCollections.append(columnsToRename, from, to), + columnToAlter, + columnToAlterType, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnName, + dataType, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public BuildableQuery withVertexLabel(@Nullable CqlIdentifier vertexLabelId) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + new DseTableVertexOperation(DseTableGraphOperationType.WITH, vertexLabelId), + edgeOperation, + options); + } + + @NonNull + @Override + public BuildableQuery withoutVertexLabel(@Nullable CqlIdentifier vertexLabelId) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + new DseTableVertexOperation(DseTableGraphOperationType.WITHOUT, vertexLabelId), + edgeOperation, + options); + } + + @NonNull + @Override + public BuildableQuery withEdgeLabel( + @Nullable CqlIdentifier edgeLabelId, + @NonNull DseGraphEdgeSide from, + @NonNull DseGraphEdgeSide to) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + new DseTableEdgeOperation(DseTableGraphOperationType.WITH, edgeLabelId, from, to), + options); + } + + @NonNull + @Override + public BuildableQuery withoutEdgeLabel(@Nullable CqlIdentifier edgeLabelId) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + new DseTableEdgeOperation(DseTableGraphOperationType.WITHOUT, edgeLabelId, null, null), + options); + } + + @NonNull + @Override + public AlterDseTableWithOptionsEnd withOption(@NonNull String name, @NonNull Object value) { + return new DefaultAlterDseTable( + keyspace, + tableName, + dropCompactStorage, + columnsToAddInOrder, + columnsToAdd, + columnsToAddStatic, + columnsToDrop, + columnsToRename, + columnToAlter, + columnToAlterType, + vertexOperation, + edgeOperation, + ImmutableCollections.append(options, name, value)); + } + + @NonNull + @Override + public String asCql() { + StringBuilder builder = new StringBuilder("ALTER TABLE "); + + CqlHelper.qualify(keyspace, tableName, builder); + + if (columnToAlter != null) { + return builder + .append(" ALTER ") + .append(columnToAlter.asCql(true)) + .append(" TYPE ") + .append(columnToAlterType.asCql(true, true)) + .toString(); + } else if (!columnsToAdd.isEmpty()) { + builder.append(" ADD "); + if (columnsToAdd.size() > 1) { + builder.append('('); + } + boolean first = true; + for (Map.Entry column : columnsToAddInOrder.entrySet()) { + if (first) { + first = false; + } else { + builder.append(','); + } + builder + .append(column.getKey().asCql(true)) + .append(' ') + .append(column.getValue().asCql(true, true)); + + if (columnsToAddStatic.contains(column.getKey())) { + builder.append(" STATIC"); + } + } + if (columnsToAdd.size() > 1) { + builder.append(')'); + } + return builder.toString(); + } else if (!columnsToDrop.isEmpty()) { + boolean moreThanOneDrop = columnsToDrop.size() > 1; + CqlHelper.appendIds( + columnsToDrop, + builder, + moreThanOneDrop ? " DROP (" : " DROP ", + ",", + moreThanOneDrop ? ")" : ""); + return builder.toString(); + } else if (!columnsToRename.isEmpty()) { + builder.append(" RENAME "); + boolean first = true; + for (Map.Entry entry : columnsToRename.entrySet()) { + if (first) { + first = false; + } else { + builder.append(" AND "); + } + builder + .append(entry.getKey().asCql(true)) + .append(" TO ") + .append(entry.getValue().asCql(true)); + } + return builder.toString(); + } else if (vertexOperation != null) { + builder.append(' ').append(vertexOperation.getType()).append(' '); + vertexOperation.append(builder); + } else if (edgeOperation != null) { + builder.append(' ').append(edgeOperation.getType()).append(' '); + edgeOperation.append(builder); + } else if (dropCompactStorage) { + return builder.append(" DROP COMPACT STORAGE").toString(); + } else if (!options.isEmpty()) { + return builder.append(OptionsUtils.buildOptions(options, true)).toString(); + } + + // While this is incomplete, we should return partially build query at this point for toString + // purposes. + return builder.toString(); + } + + @Override + public String toString() { + return asCql(); + } + + @NonNull + @Override + public Map getOptions() { + return options; + } + + @Nullable + public CqlIdentifier getKeyspace() { + return keyspace; + } + + @NonNull + public CqlIdentifier getTable() { + return tableName; + } + + @NonNull + public ImmutableMap getColumnsToAddInOrder() { + return columnsToAddInOrder; + } + + @NonNull + public ImmutableSet getColumnsToAddRegular() { + return columnsToAdd; + } + + @NonNull + public ImmutableSet getColumnsToAddStatic() { + return columnsToAddStatic; + } + + @NonNull + public ImmutableSet getColumnsToDrop() { + return columnsToDrop; + } + + @NonNull + public ImmutableMap getColumnsToRename() { + return columnsToRename; + } + + @Nullable + public CqlIdentifier getColumnToAlter() { + return columnToAlter; + } + + @Nullable + public DataType getColumnToAlterType() { + return columnToAlterType; + } + + @Nullable + public DseTableVertexOperation getVertexOperation() { + return vertexOperation; + } + + @Nullable + public DseTableEdgeOperation getEdgeOperation() { + return edgeOperation; + } + + public boolean isDropCompactStorage() { + return dropCompactStorage; + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseAggregate.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseAggregate.java new file mode 100644 index 00000000000..38c13f6e7d5 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseAggregate.java @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateEnd; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseAggregateStateFunc; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.querybuilder.term.Term; +import com.datastax.oss.driver.internal.querybuilder.CqlHelper; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import net.jcip.annotations.Immutable; + +/** + * Implements DSE extended interfaces for creating aggregates. This class provides the same + * functionality as the Cassandra OSS {@link + * com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateAggregate} implementation, with + * the additional DSE specific extended functionality (DETERMINISTIC keyword). + */ +@Immutable +public class DefaultCreateDseAggregate + implements CreateDseAggregateEnd, CreateDseAggregateStart, CreateDseAggregateStateFunc { + + private final CqlIdentifier keyspace; + private final CqlIdentifier functionName; + private boolean orReplace; + private boolean ifNotExists; + private final ImmutableList parameters; + private final CqlIdentifier sFunc; + private final DataType sType; + private final CqlIdentifier finalFunc; + private final Term term; + private final boolean deterministic; + + public DefaultCreateDseAggregate(@NonNull CqlIdentifier functionName) { + this(null, functionName); + } + + public DefaultCreateDseAggregate( + @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier functionName) { + this(keyspace, functionName, false, false, ImmutableList.of(), null, null, null, null, false); + } + + public DefaultCreateDseAggregate( + @Nullable CqlIdentifier keyspace, + @NonNull CqlIdentifier functionName, + boolean orReplace, + boolean ifNotExists, + @NonNull ImmutableList parameters, + @Nullable CqlIdentifier sFunc, + @Nullable DataType sType, + @Nullable CqlIdentifier finalFunc, + @Nullable Term term, + boolean deterministic) { + this.keyspace = keyspace; + this.functionName = functionName; + this.orReplace = orReplace; + this.ifNotExists = ifNotExists; + this.parameters = parameters; + this.sFunc = sFunc; + this.sType = sType; + this.finalFunc = finalFunc; + this.term = term; + this.deterministic = deterministic; + } + + @NonNull + @Override + public String asCql() { + StringBuilder builder = new StringBuilder(); + + builder.append("CREATE "); + if (orReplace) { + builder.append("OR REPLACE "); + } + builder.append("AGGREGATE "); + + if (ifNotExists) { + builder.append("IF NOT EXISTS "); + } + CqlHelper.qualify(keyspace, functionName, builder); + + builder.append(" ("); + boolean first = true; + for (DataType param : parameters) { + if (first) { + first = false; + } else { + builder.append(','); + } + builder.append(param.asCql(false, true)); + } + builder.append(')'); + if (sFunc != null) { + builder.append(" SFUNC "); + builder.append(sFunc.asCql(true)); + } + if (sType != null) { + builder.append(" STYPE "); + builder.append(sType.asCql(false, true)); + } + if (finalFunc != null) { + builder.append(" FINALFUNC "); + builder.append(finalFunc.asCql(true)); + } + if (term != null) { + builder.append(" INITCOND "); + term.appendTo(builder); + } + // deterministic + if (deterministic) { + builder.append(" DETERMINISTIC"); + } + return builder.toString(); + } + + @NonNull + @Override + public CreateDseAggregateEnd withInitCond(@NonNull Term term) { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateStart ifNotExists() { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + true, + parameters, + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateStart orReplace() { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + true, + ifNotExists, + parameters, + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateStart withParameter(@NonNull DataType paramType) { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + ifNotExists, + ImmutableCollections.append(parameters, paramType), + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateStateFunc withSFunc(@NonNull CqlIdentifier sFunc) { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateEnd withSType(@NonNull DataType sType) { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateEnd withFinalFunc(@NonNull CqlIdentifier finalFunc) { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + sFunc, + sType, + finalFunc, + term, + deterministic); + } + + @NonNull + @Override + public CreateDseAggregateEnd deterministic() { + return new DefaultCreateDseAggregate( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + sFunc, + sType, + finalFunc, + term, + true); + } + + @Override + public String toString() { + return asCql(); + } + + @Nullable + public CqlIdentifier getKeyspace() { + return keyspace; + } + + @NonNull + public CqlIdentifier getFunctionName() { + return functionName; + } + + public boolean isOrReplace() { + return orReplace; + } + + public boolean isIfNotExists() { + return ifNotExists; + } + + @NonNull + public ImmutableList getParameters() { + return parameters; + } + + @Nullable + public CqlIdentifier getsFunc() { + return sFunc; + } + + @Nullable + public DataType getsType() { + return sType; + } + + @Nullable + public CqlIdentifier getFinalFunc() { + return finalFunc; + } + + @Nullable + public Term getTerm() { + return term; + } + + public boolean isDeterministic() { + return deterministic; + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseFunction.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseFunction.java new file mode 100644 index 00000000000..679629bf893 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseFunction.java @@ -0,0 +1,444 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionEnd; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionWithLanguage; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionWithNullOption; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseFunctionWithType; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.querybuilder.CqlHelper; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Map; +import net.jcip.annotations.Immutable; + +/** + * Implements DSE extended interfaces for creating functions. This class provides the same + * functionality as the Cassandra OSS {@link + * com.datastax.oss.driver.internal.querybuilder.schema.DefaultCreateFunction} implementation, with + * the additional DSE specific extended functionality (DETERMINISTIC and MONOTONIC keywords). + */ +@Immutable +public class DefaultCreateDseFunction + implements CreateDseFunctionEnd, + CreateDseFunctionStart, + CreateDseFunctionWithLanguage, + CreateDseFunctionWithNullOption, + CreateDseFunctionWithType { + + private final CqlIdentifier keyspace; + private final CqlIdentifier functionName; + private boolean orReplace; + private boolean ifNotExists; + private final ImmutableMap parameters; + private boolean returnsNullOnNull; + private final DataType returnType; + private final String language; + private final String functionBody; + private final boolean deterministic; + private final boolean globallyMonotonic; + private final CqlIdentifier monotonicOn; + + public DefaultCreateDseFunction(CqlIdentifier functionName) { + this(null, functionName); + } + + public DefaultCreateDseFunction(CqlIdentifier keyspace, CqlIdentifier functionName) { + this( + keyspace, + functionName, + false, + false, + ImmutableMap.of(), + false, + null, + null, + null, + false, + false, + null); + } + + public DefaultCreateDseFunction( + CqlIdentifier keyspace, + CqlIdentifier functionName, + boolean orReplace, + boolean ifNotExists, + ImmutableMap parameters, + boolean returnsNullOnNull, + DataType returns, + String language, + String functionBody, + boolean deterministic, + boolean globallyMonotonic, + CqlIdentifier monotonicOn) { + this.keyspace = keyspace; + this.functionName = functionName; + this.orReplace = orReplace; + this.ifNotExists = ifNotExists; + this.parameters = parameters; + this.returnsNullOnNull = returnsNullOnNull; + this.returnType = returns; + this.language = language; + this.functionBody = functionBody; + this.deterministic = deterministic; + this.globallyMonotonic = globallyMonotonic; + this.monotonicOn = monotonicOn; + } + + @NonNull + @Override + public String asCql() { + StringBuilder builder = new StringBuilder(); + + builder.append("CREATE "); + if (orReplace) { + builder.append("OR REPLACE "); + } + builder.append("FUNCTION "); + + if (ifNotExists) { + builder.append("IF NOT EXISTS "); + } + CqlHelper.qualify(keyspace, functionName, builder); + + builder.append(" ("); + + boolean first = true; + for (Map.Entry param : parameters.entrySet()) { + if (first) { + first = false; + } else { + builder.append(','); + } + builder + .append(param.getKey().asCql(true)) + .append(' ') + .append(param.getValue().asCql(false, true)); + } + builder.append(')'); + if (returnsNullOnNull) { + builder.append(" RETURNS NULL"); + } else { + builder.append(" CALLED"); + } + + builder.append(" ON NULL INPUT"); + + if (returnType == null) { + // return type has not been provided yet. + return builder.toString(); + } + + builder.append(" RETURNS "); + builder.append(returnType.asCql(false, true)); + + // deterministic + if (deterministic) { + builder.append(" DETERMINISTIC"); + } + + // monotonic + if (globallyMonotonic) { + builder.append(" MONOTONIC"); + } else if (monotonicOn != null) { + builder.append(" MONOTONIC ON ").append(monotonicOn.asCql(true)); + } + + if (language == null) { + // language has not been provided yet. + return builder.toString(); + } + + builder.append(" LANGUAGE "); + builder.append(language); + + if (functionBody == null) { + // body has not been provided yet. + return builder.toString(); + } + + builder.append(" AS "); + builder.append(functionBody); + return builder.toString(); + } + + @Override + public String toString() { + return asCql(); + } + + @NonNull + @Override + public CreateDseFunctionEnd as(@NonNull String functionBody) { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionWithLanguage withLanguage(@NonNull String language) { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionStart ifNotExists() { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + true, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionStart orReplace() { + return new DefaultCreateDseFunction( + keyspace, + functionName, + true, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionStart withParameter( + @NonNull CqlIdentifier paramName, @NonNull DataType paramType) { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + ImmutableCollections.append(parameters, paramName, paramType), + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionWithNullOption returnsNullOnNull() { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + true, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionWithNullOption calledOnNull() { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + false, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionWithType deterministic() { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + true, + globallyMonotonic, + monotonicOn); + } + + @NonNull + @Override + public CreateDseFunctionWithType monotonic() { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + true, + null); + } + + @NonNull + @Override + public CreateDseFunctionWithType monotonicOn(@NonNull CqlIdentifier monotonicColumn) { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + false, + monotonicColumn); + } + + @NonNull + @Override + public CreateDseFunctionWithType returnsType(@NonNull DataType returnType) { + return new DefaultCreateDseFunction( + keyspace, + functionName, + orReplace, + ifNotExists, + parameters, + returnsNullOnNull, + returnType, + language, + functionBody, + deterministic, + globallyMonotonic, + monotonicOn); + } + + @Nullable + public CqlIdentifier getKeyspace() { + return keyspace; + } + + @NonNull + public CqlIdentifier getFunction() { + return functionName; + } + + public boolean isOrReplace() { + return orReplace; + } + + public boolean isIfNotExists() { + return ifNotExists; + } + + @NonNull + public ImmutableMap getParameters() { + return parameters; + } + + public boolean isReturnsNullOnNull() { + return returnsNullOnNull; + } + + @Nullable + public DataType getReturnType() { + return returnType; + } + + @Nullable + public String getLanguage() { + return language; + } + + @Nullable + public String getFunctionBody() { + return functionBody; + } + + public boolean isDeterministic() { + return deterministic; + } + + public boolean isGloballyMonotonic() { + return globallyMonotonic; + } + + @Nullable + public CqlIdentifier getMonotonicOn() { + return monotonicOn; + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseKeyspace.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseKeyspace.java new file mode 100644 index 00000000000..3b542254dab --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseKeyspace.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseKeyspace; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseKeyspaceStart; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultCreateDseKeyspace implements CreateDseKeyspace, CreateDseKeyspaceStart { + + private final CqlIdentifier keyspaceName; + private final boolean ifNotExists; + private final ImmutableMap options; + + public DefaultCreateDseKeyspace(@NonNull CqlIdentifier keyspaceName) { + this(keyspaceName, false, ImmutableMap.of()); + } + + public DefaultCreateDseKeyspace( + @NonNull CqlIdentifier keyspaceName, + boolean ifNotExists, + @NonNull ImmutableMap options) { + this.keyspaceName = keyspaceName; + this.ifNotExists = ifNotExists; + this.options = options; + } + + @NonNull + @Override + public CreateDseKeyspace withOption(@NonNull String name, @NonNull Object value) { + return new DefaultCreateDseKeyspace( + keyspaceName, ifNotExists, ImmutableCollections.append(options, name, value)); + } + + @NonNull + @Override + public CreateDseKeyspaceStart ifNotExists() { + return new DefaultCreateDseKeyspace(keyspaceName, true, options); + } + + @NonNull + @Override + public CreateDseKeyspace withReplicationOptions(@NonNull Map replicationOptions) { + return withOption("replication", replicationOptions); + } + + @NonNull + @Override + public String asCql() { + StringBuilder builder = new StringBuilder(); + + builder.append("CREATE KEYSPACE "); + if (ifNotExists) { + builder.append("IF NOT EXISTS "); + } + + builder.append(keyspaceName.asCql(true)); + builder.append(OptionsUtils.buildOptions(options, true)); + return builder.toString(); + } + + @Override + public String toString() { + return asCql(); + } + + @NonNull + @Override + public Map getOptions() { + return options; + } + + @NonNull + public CqlIdentifier getKeyspace() { + return keyspaceName; + } + + public boolean isIfNotExists() { + return ifNotExists; + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseTable.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseTable.java new file mode 100644 index 00000000000..86169cdd29b --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultCreateDseTable.java @@ -0,0 +1,479 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import static com.datastax.oss.driver.internal.querybuilder.schema.Utils.appendSet; + +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTable; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTableStart; +import com.datastax.dse.driver.api.querybuilder.schema.CreateDseTableWithOptions; +import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.internal.querybuilder.CqlHelper; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.internal.querybuilder.schema.OptionsUtils; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; +import java.util.Map; +import net.jcip.annotations.Immutable; + +@Immutable +public class DefaultCreateDseTable + implements CreateDseTableStart, CreateDseTable, CreateDseTableWithOptions { + + private final CqlIdentifier keyspace; + private final CqlIdentifier tableName; + + private final boolean ifNotExists; + private final boolean compactStorage; + + private final ImmutableMap options; + + private final ImmutableMap columnsInOrder; + + private final ImmutableSet partitionKeyColumns; + private final ImmutableSet clusteringKeyColumns; + private final ImmutableSet staticColumns; + private final ImmutableSet regularColumns; + + private final ImmutableMap orderings; + + private final DseTableVertexOperation vertexOperation; + private final DseTableEdgeOperation edgeOperation; + + public DefaultCreateDseTable(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName) { + this( + keyspace, + tableName, + false, + false, + ImmutableMap.of(), + ImmutableSet.of(), + ImmutableSet.of(), + ImmutableSet.of(), + ImmutableSet.of(), + ImmutableMap.of(), + null, + null, + ImmutableMap.of()); + } + + public DefaultCreateDseTable( + @Nullable CqlIdentifier keyspace, + @NonNull CqlIdentifier tableName, + boolean ifNotExists, + boolean compactStorage, + @NonNull ImmutableMap columnsInOrder, + @NonNull ImmutableSet partitionKeyColumns, + @NonNull ImmutableSet clusteringKeyColumns, + @NonNull ImmutableSet staticColumns, + @NonNull ImmutableSet regularColumns, + @NonNull ImmutableMap orderings, + @Nullable DseTableVertexOperation vertexOperation, + @Nullable DseTableEdgeOperation edgeOperation, + @NonNull ImmutableMap options) { + this.keyspace = keyspace; + this.tableName = tableName; + this.ifNotExists = ifNotExists; + this.compactStorage = compactStorage; + this.columnsInOrder = columnsInOrder; + this.partitionKeyColumns = partitionKeyColumns; + this.clusteringKeyColumns = clusteringKeyColumns; + this.staticColumns = staticColumns; + this.regularColumns = regularColumns; + this.orderings = orderings; + this.options = options; + this.vertexOperation = vertexOperation; + this.edgeOperation = edgeOperation; + } + + @NonNull + @Override + public CreateDseTableStart ifNotExists() { + return new DefaultCreateDseTable( + keyspace, + tableName, + true, + compactStorage, + columnsInOrder, + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTable withPartitionKey( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + ImmutableCollections.append(columnsInOrder, columnName, dataType), + appendSet(partitionKeyColumns, columnName), + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTable withClusteringColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + ImmutableCollections.append(columnsInOrder, columnName, dataType), + partitionKeyColumns, + appendSet(clusteringKeyColumns, columnName), + staticColumns, + regularColumns, + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTable withColumn(@NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + ImmutableCollections.append(columnsInOrder, columnName, dataType), + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + appendSet(regularColumns, columnName), + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTable withStaticColumn( + @NonNull CqlIdentifier columnName, @NonNull DataType dataType) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + ImmutableCollections.append(columnsInOrder, columnName, dataType), + partitionKeyColumns, + clusteringKeyColumns, + appendSet(staticColumns, columnName), + regularColumns, + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTableWithOptions withCompactStorage() { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + true, + columnsInOrder, + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTableWithOptions withClusteringOrderByIds( + @NonNull Map orderings) { + return withClusteringOrders(ImmutableCollections.concat(this.orderings, orderings)); + } + + @NonNull + @Override + public CreateDseTableWithOptions withClusteringOrder( + @NonNull CqlIdentifier columnName, @NonNull ClusteringOrder order) { + return withClusteringOrders(ImmutableCollections.append(orderings, columnName, order)); + } + + @NonNull + public CreateDseTableWithOptions withClusteringOrders( + @NonNull ImmutableMap orderings) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + columnsInOrder, + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + vertexOperation, + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTableWithOptions withVertexLabel(@Nullable CqlIdentifier vertexLabelId) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + columnsInOrder, + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + new DseTableVertexOperation(DseTableGraphOperationType.WITH, vertexLabelId), + edgeOperation, + options); + } + + @NonNull + @Override + public CreateDseTableWithOptions withEdgeLabel( + @Nullable CqlIdentifier edgeLabelId, + @NonNull DseGraphEdgeSide from, + @NonNull DseGraphEdgeSide to) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + columnsInOrder, + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + vertexOperation, + new DseTableEdgeOperation(DseTableGraphOperationType.WITH, edgeLabelId, from, to), + options); + } + + @NonNull + @Override + public CreateDseTable withOption(@NonNull String name, @NonNull Object value) { + return new DefaultCreateDseTable( + keyspace, + tableName, + ifNotExists, + compactStorage, + columnsInOrder, + partitionKeyColumns, + clusteringKeyColumns, + staticColumns, + regularColumns, + orderings, + vertexOperation, + edgeOperation, + ImmutableCollections.append(options, name, value)); + } + + @NonNull + @Override + public String asCql() { + StringBuilder builder = new StringBuilder(); + + builder.append("CREATE TABLE "); + if (ifNotExists) { + builder.append("IF NOT EXISTS "); + } + + CqlHelper.qualify(keyspace, tableName, builder); + + if (columnsInOrder.isEmpty()) { + // no columns provided yet. + return builder.toString(); + } + + boolean singlePrimaryKey = partitionKeyColumns.size() == 1 && clusteringKeyColumns.size() == 0; + + builder.append(" ("); + + boolean first = true; + for (Map.Entry column : columnsInOrder.entrySet()) { + if (first) { + first = false; + } else { + builder.append(','); + } + builder + .append(column.getKey().asCql(true)) + .append(' ') + .append(column.getValue().asCql(true, true)); + + if (singlePrimaryKey && partitionKeyColumns.contains(column.getKey())) { + builder.append(" PRIMARY KEY"); + } else if (staticColumns.contains(column.getKey())) { + builder.append(" STATIC"); + } + } + + if (!singlePrimaryKey) { + builder.append(","); + CqlHelper.buildPrimaryKey(partitionKeyColumns, clusteringKeyColumns, builder); + } + + builder.append(')'); + + boolean firstOption = true; + + if (compactStorage) { + firstOption = false; + builder.append(" WITH COMPACT STORAGE"); + } + + if (!orderings.isEmpty()) { + if (firstOption) { + builder.append(" WITH "); + firstOption = false; + } else { + builder.append(" AND "); + } + builder.append("CLUSTERING ORDER BY ("); + boolean firstClustering = true; + + for (Map.Entry ordering : orderings.entrySet()) { + if (firstClustering) { + firstClustering = false; + } else { + builder.append(','); + } + builder + .append(ordering.getKey().asCql(true)) + .append(' ') + .append(ordering.getValue().toString()); + } + + builder.append(')'); + } + + if (vertexOperation != null) { + if (firstOption) { + builder.append(" WITH "); + firstOption = false; + } else { + builder.append(" AND "); + } + vertexOperation.append(builder); + } else if (edgeOperation != null) { + if (firstOption) { + builder.append(" WITH "); + firstOption = false; + } else { + builder.append(" AND "); + } + edgeOperation.append(builder); + } + + builder.append(OptionsUtils.buildOptions(options, firstOption)); + + return builder.toString(); + } + + @Override + public String toString() { + return asCql(); + } + + @NonNull + @Override + public Map getOptions() { + return options; + } + + @Nullable + public CqlIdentifier getKeyspace() { + return keyspace; + } + + @NonNull + public CqlIdentifier getTable() { + return tableName; + } + + public boolean isIfNotExists() { + return ifNotExists; + } + + public boolean isCompactStorage() { + return compactStorage; + } + + @NonNull + public ImmutableMap getColumnsInOrder() { + return columnsInOrder; + } + + @NonNull + public ImmutableSet getPartitionKeyColumns() { + return partitionKeyColumns; + } + + @NonNull + public ImmutableSet getClusteringKeyColumns() { + return clusteringKeyColumns; + } + + @NonNull + public ImmutableSet getStaticColumns() { + return staticColumns; + } + + @NonNull + public ImmutableSet getRegularColumns() { + return regularColumns; + } + + @NonNull + public ImmutableMap getOrderings() { + return orderings; + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultDseGraphEdgeSide.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultDseGraphEdgeSide.java new file mode 100644 index 00000000000..32f43ab8ff2 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DefaultDseGraphEdgeSide.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.internal.querybuilder.ImmutableCollections; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.List; + +public class DefaultDseGraphEdgeSide implements DseGraphEdgeSide { + + private final CqlIdentifier tableId; + private final ImmutableList partitionKeyColumns; + private final ImmutableList clusteringColumns; + + public DefaultDseGraphEdgeSide(CqlIdentifier tableId) { + this(tableId, ImmutableList.of(), ImmutableList.of()); + } + + private DefaultDseGraphEdgeSide( + CqlIdentifier tableId, + ImmutableList partitionKeyColumns, + ImmutableList clusteringColumns) { + this.tableId = tableId; + this.partitionKeyColumns = partitionKeyColumns; + this.clusteringColumns = clusteringColumns; + } + + @NonNull + @Override + public DseGraphEdgeSide withPartitionKey(@NonNull CqlIdentifier columnId) { + return new DefaultDseGraphEdgeSide( + tableId, ImmutableCollections.append(partitionKeyColumns, columnId), clusteringColumns); + } + + @NonNull + @Override + public DseGraphEdgeSide withClusteringColumn(@NonNull CqlIdentifier columnId) { + return new DefaultDseGraphEdgeSide( + tableId, partitionKeyColumns, ImmutableCollections.append(clusteringColumns, columnId)); + } + + @NonNull + @Override + public CqlIdentifier getTableId() { + return tableId; + } + + @NonNull + @Override + public List getPartitionKeyColumns() { + return partitionKeyColumns; + } + + @NonNull + @Override + public List getClusteringColumns() { + return clusteringColumns; + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableEdgeOperation.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableEdgeOperation.java new file mode 100644 index 00000000000..f514158e853 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableEdgeOperation.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide; +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.internal.querybuilder.CqlHelper; +import java.util.List; + +public class DseTableEdgeOperation { + + private final DseTableGraphOperationType type; + private final CqlIdentifier label; + private final DseGraphEdgeSide from; + private final DseGraphEdgeSide to; + + public DseTableEdgeOperation( + DseTableGraphOperationType type, + CqlIdentifier label, + DseGraphEdgeSide from, + DseGraphEdgeSide to) { + this.type = type; + this.label = label; + this.from = from; + this.to = to; + } + + public DseTableGraphOperationType getType() { + return type; + } + + public CqlIdentifier getLabel() { + return label; + } + + public DseGraphEdgeSide getFrom() { + return from; + } + + public DseGraphEdgeSide getTo() { + return to; + } + + public void append(StringBuilder builder) { + builder.append("EDGE LABEL"); + if (label != null) { + builder.append(' ').append(label.asCql(true)); + } + if (type == DseTableGraphOperationType.WITH) { + builder.append(" FROM "); + append(from, builder); + builder.append(" TO "); + append(to, builder); + } + } + + private static void append(DseGraphEdgeSide side, StringBuilder builder) { + builder.append(side.getTableId().asCql(true)).append('('); + List pkColumns = side.getPartitionKeyColumns(); + if (pkColumns.size() == 1) { + builder.append(pkColumns.get(0).asCql(true)); + } else { + CqlHelper.appendIds(pkColumns, builder, "(", ",", ")"); + } + CqlHelper.appendIds(side.getClusteringColumns(), builder, ",", ",", null); + builder.append(')'); + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableGraphOperationType.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableGraphOperationType.java new file mode 100644 index 00000000000..35d5dd7c80b --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableGraphOperationType.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +public enum DseTableGraphOperationType { + WITH, + WITHOUT +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableVertexOperation.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableVertexOperation.java new file mode 100644 index 00000000000..64a2d44c29a --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/DseTableVertexOperation.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import com.datastax.oss.driver.api.core.CqlIdentifier; + +public class DseTableVertexOperation { + + private final DseTableGraphOperationType type; + private final CqlIdentifier label; + + public DseTableVertexOperation(DseTableGraphOperationType type, CqlIdentifier label) { + this.type = type; + this.label = label; + } + + public DseTableGraphOperationType getType() { + return type; + } + + public CqlIdentifier getLabel() { + return label; + } + + public void append(StringBuilder builder) { + builder.append("VERTEX LABEL"); + if (label != null) { + builder.append(' ').append(label.asCql(true)); + } + } +} diff --git a/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/package-info.java b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/package-info.java new file mode 100644 index 00000000000..de137d9f952 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/dse/driver/internal/querybuilder/schema/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * This package effectively mirrors the Cassandra OSS default query and schema implementations to + * allow extended schema and query building for the DSE driver. In general, a class in this package + * will need to implement the DSE equivalent interfaces for any DSE specific extensions. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java index 0234d4fbb15..58ef2c88647 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BindMarker.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java index 05f874a9c6b..729b73deead 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/BuildableQuery.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java index 1518c1583a7..ba06391e628 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/CqlSnippet.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java index 0bb9f6b94a0..652e3e0de18 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Literal.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java index 6a42f1d0369..8df2b7efdd0 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/QueryBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.token.Token; import com.datastax.oss.driver.api.core.type.DataType; import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.UserDefinedType; @@ -32,6 +35,9 @@ import com.datastax.oss.driver.api.querybuilder.truncate.Truncate; import com.datastax.oss.driver.api.querybuilder.update.UpdateStart; import com.datastax.oss.driver.internal.core.metadata.schema.ShallowUserDefinedType; +import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedToken; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; +import com.datastax.oss.driver.internal.core.metadata.token.RandomToken; import com.datastax.oss.driver.internal.querybuilder.ArithmeticOperator; import com.datastax.oss.driver.internal.querybuilder.DefaultLiteral; import com.datastax.oss.driver.internal.querybuilder.DefaultRaw; @@ -402,6 +408,15 @@ public static Literal literal(@Nullable Object value) { */ @NonNull public static Literal literal(@Nullable Object value, @NonNull CodecRegistry codecRegistry) { + if (value instanceof Murmur3Token) { + value = ((Murmur3Token) value).getValue(); + } else if (value instanceof ByteOrderedToken) { + value = ((ByteOrderedToken) value).getValue(); + } else if (value instanceof RandomToken) { + value = ((RandomToken) value).getValue(); + } else if (value instanceof Token) { + throw new IllegalArgumentException("Unsupported token type: " + value.getClass().getName()); + } try { return literal(value, (value == null) ? null : codecRegistry.codecFor(value)); } catch (CodecNotFoundException e) { @@ -424,6 +439,8 @@ public static Literal literal(@Nullable Object value, @NonNull CodecRegistry cod */ @NonNull public static Literal literal(@Nullable T value, @Nullable TypeCodec codec) { + // Don't handle Token here, if the user calls this directly we assume they passed a codec that + // can handle the value return new DefaultLiteral<>(value, codec); } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java index 453ee2f3b14..0c551bfa557 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/Raw.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java index e6f63fe1702..27fabf29219 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/SchemaBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java index f9f1b6ed10c..01b50166426 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/Condition.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java index 35aae1708d2..83ab849ca9d 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java index db9e7f27cb2..2822c0a2f6f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionalStatement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java index bbd9aac0058..4688cac86e0 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/Delete.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java index 8f325ced6e7..e8cf7a26855 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelection.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java index 929797c9d1b..09a29161417 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/Insert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java index 0fbc0a595b8..26bee52c377 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/InsertInto.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java index 621cc00f783..f287cd31501 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java index 285ab4c84d0..a3e6572608a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/OngoingValues.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java index 93878a2569e..f26a1c53e93 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java index 26ff62dcf4c..50e2a03f347 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ArithmeticRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java index 9d83da4b6fa..5b79dfb74d0 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnComponentRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java index fc615d6cbbd..247d61eaed5 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/ColumnRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,4 +46,28 @@ default ResultT contains(@NonNull Term term) { default ResultT containsKey(@NonNull Term term) { return build(" CONTAINS KEY ", term); } + + /** + * Builds a NOT CONTAINS relation for the column. + * + *

          Note that NOT CONTAINS support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more + * information. + */ + @NonNull + default ResultT notContains(@NonNull Term term) { + return build(" NOT CONTAINS ", term); + } + + /** + * Builds a NOT CONTAINS KEY relation for the column. + * + *

          Note that NOT CONTAINS KEY support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more + * information. + */ + @NonNull + default ResultT notContainsKey(@NonNull Term term) { + return build(" NOT CONTAINS KEY ", term); + } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java index 626ea69c5e2..afaa19ff724 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/InRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,6 +50,38 @@ default ResultT in(@NonNull Term... alternatives) { return in(Arrays.asList(alternatives)); } + /** + * Builds a NOT IN relation where the whole set of possible values is a bound variable, as in + * {@code NOT IN ?}. + * + *

          Note that NOT IN support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more + * information. + */ + @NonNull + default ResultT notIn(@NonNull BindMarker bindMarker) { + return build(" NOT IN ", bindMarker); + } + + /** + * Builds an IN relation where the arguments are the possible values, as in {@code IN (term1, + * term2...)}. + * + *

          Note that NOT IN support is only available in Cassandra 5.1 or later. See CASSANDRA-18584 for more + * information. + */ + @NonNull + default ResultT notIn(@NonNull Iterable alternatives) { + return build(" NOT IN ", QueryBuilder.tuple(alternatives)); + } + + /** Var-arg equivalent of {@link #notIn(Iterable)} . */ + @NonNull + default ResultT notIn(@NonNull Term... alternatives) { + return notIn(Arrays.asList(alternatives)); + } + @NonNull ResultT build(@NonNull String operator, @Nullable Term rightOperand); } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java index 03d52b6a787..26bc927953b 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/MultiColumnRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java index 6f34fa0d7ec..16b8072fdff 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/OngoingWhereClause.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java index 48d478e549b..41020332643 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/Relation.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java index 291e953c9c5..05fe10527ee 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/relation/TokenRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java index 0394fc1f0c8..82eccdd94ca 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java index b10f7fb7c1c..3112aaf5950 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java index e28001f7701..56faffc70a9 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedView.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java index 6644e790c23..5739f9ff9bf 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java index 4aae7083fdf..701cec1509b 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumn.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java index db6908122da..d2728082ad3 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableAddColumnEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java index 4e7cf6e88fc..39d6abd558f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumn.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java index cab0ba75032..662f9eb0749 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableDropColumnEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java index e2aba2bcc55..8938b11ca9a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumn.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java index 40580000927..cb7b5e8699f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableRenameColumnEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java index 7306508b295..f1537073b19 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java index ab09de5c25c..c80281a4582 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java index 4c442713b71..df1cb4293d6 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableWithOptionsEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java index 39e309c39d6..82d9667c9cc 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameField.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java index 0104b7c0445..18da1aa4c1e 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeRenameFieldEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java index 67c2c744af7..6ea197a235f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java index 3f41a3822af..75d7bf1e681 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java index bd615c43650..4b46ed18f97 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java index 4a128ba1b9c..42b774ec8fc 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateStateFunc.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java index 9316eadd3eb..de600a384b2 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionEnd.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java index 9ece2c1bb09..17ae78d4b24 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java index e4d32b4edfc..bc55fd5124a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithLanguage.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java index f08522806cc..037dd1cd522 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithNullOption.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java index 4e659bb02a5..b78780bfa7c 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionWithType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java index 7b16bb0759a..ed54fd2312f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndex.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java index 8d74dba4515..62c930d4180 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexOnTable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java index 49e8f55c626..3786b8346b6 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java index 03dd8fe0c6e..098a1596db0 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java index 96ed5c54a74..57abf5f35bc 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java index 057ffeb6bfb..e231356d4c2 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedView.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java index abe08d8647f..14b8c7583f6 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKey.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java index 683a13c1caf..c214c01f6d1 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewPrimaryKeyStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java index b1573804810..28f141f5017 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelection.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java index 3c88c0fd505..ff47c9b0c1a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewSelectionWithColumns.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java index 2d5d088d0fd..1926d6ce83c 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java index 58806f4fb64..e2d1bf9b26c 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhere.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java index f2817017dd6..e7af2c07dc7 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewWhereStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java index 78c1268b966..08a6f85c424 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java index f8bb1ec7d84..82949bceb56 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java index 0a557264299..c7bddf575fb 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableWithOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +18,11 @@ package com.datastax.oss.driver.api.querybuilder.schema; import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.internal.querybuilder.schema.RawOptionsWrapper; +import com.datastax.oss.driver.shaded.guava.common.collect.Maps; +import edu.umd.cs.findbugs.annotations.CheckReturnValue; import edu.umd.cs.findbugs.annotations.NonNull; +import java.util.Map; public interface CreateTableWithOptions extends BuildableQuery, RelationStructure { @@ -24,4 +30,11 @@ public interface CreateTableWithOptions /** Enables COMPACT STORAGE in the CREATE TABLE statement. */ @NonNull CreateTableWithOptions withCompactStorage(); + + /** Attaches custom metadata to CQL table definition. */ + @NonNull + @CheckReturnValue + default CreateTableWithOptions withExtensions(@NonNull Map extensions) { + return withOption("extensions", Maps.transformValues(extensions, RawOptionsWrapper::of)); + } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java index 963dd99310c..9c9fc6e62fe 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java index a033e386335..ab19bd7ad84 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java index 56e6c51c560..418f806395f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/Drop.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java index d1c4468e05d..16da0f13dd2 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java index 8df0f549576..95113cf987f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/KeyspaceReplicationOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java index dc5b7241eef..18409b349b9 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingCreateType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java index 631463859a2..bf3f70f982a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OngoingPartitionKey.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java index 1f6437fea9c..5a503ffa93a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/OptionProvider.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java index 0c7d2b6a8c7..49b342acb7f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -56,6 +58,18 @@ default SelfT withCDC(boolean enabled) { return withOption("cdc", enabled); } + /** + * Defines the crc check chance. + * + *

          Note that using this option with a version of Apache Cassandra less than 3.0 will raise a + * syntax error. + */ + @NonNull + @CheckReturnValue + default SelfT withCRCCheckChance(double crcCheckChance) { + return withOption("crc_check_chance", crcCheckChance); + } + /** * Defines the caching criteria. * @@ -95,22 +109,32 @@ default SelfT withCompaction(@NonNull CompactionStrategy compactionStrategy) } /** - * Configures compression using the LZ4 algorithm with the given chunk length and crc check - * chance. - * - * @see #withCompression(String, int, double) + * @deprecated This method only exists for backward compatibility. Will not work with Apache + * Cassandra 5.0 or later. Use {@link #withLZ4Compression(int)} instead. */ + @Deprecated @NonNull @CheckReturnValue default SelfT withLZ4Compression(int chunkLengthKB, double crcCheckChance) { return withCompression("LZ4Compressor", chunkLengthKB, crcCheckChance); } + /** + * Configures compression using the LZ4 algorithm with the given chunk length. + * + * @see #withCompression(String, int) + */ + @NonNull + @CheckReturnValue + default SelfT withLZ4Compression(int chunkLengthKB) { + return withCompression("LZ4Compressor", chunkLengthKB); + } + /** * Configures compression using the LZ4 algorithm using the default configuration (64kb - * chunk_length, and 1.0 crc_check_chance). + * chunk_length). * - * @see #withCompression(String, int, double) + * @see #withCompression(String, int) */ @NonNull @CheckReturnValue @@ -119,22 +143,57 @@ default SelfT withLZ4Compression() { } /** - * Configures compression using the Snappy algorithm with the given chunk length and crc check - * chance. + * Configures compression using the Zstd algorithm with the given chunk length. * - * @see #withCompression(String, int, double) + * @see #withCompression(String, int) + */ + @NonNull + @CheckReturnValue + default SelfT withZstdCompression(int chunkLengthKB) { + return withCompression("ZstdCompressor", chunkLengthKB); + } + + /** + * Configures compression using the Zstd algorithm using the default configuration (64kb + * chunk_length). + * + * @see #withCompression(String, int) + */ + @NonNull + @CheckReturnValue + default SelfT withZstdCompression() { + return withCompression("ZstdCompressor"); + } + + /** + * @deprecated This method only exists for backward compatibility. Will not work with Apache + * Cassandra 5.0 or later due to removal of deprecated table properties (CASSANDRA-18742). Use + * {@link #withSnappyCompression(int)} instead. */ + @Deprecated @NonNull @CheckReturnValue default SelfT withSnappyCompression(int chunkLengthKB, double crcCheckChance) { return withCompression("SnappyCompressor", chunkLengthKB, crcCheckChance); } + /** + * Configures compression using the Snappy algorithm with the given chunk length. + * + * @see #withCompression(String, int) + */ + @NonNull + @CheckReturnValue + default SelfT withSnappyCompression(int chunkLengthKB) { + return withCompression("SnappyCompressor", chunkLengthKB); + } + /** * Configures compression using the Snappy algorithm using the default configuration (64kb - * chunk_length, and 1.0 crc_check_chance). + * chunk_length). * - * @see #withCompression(String, int, double) + * @see #withCompression(String, int) */ @NonNull @CheckReturnValue @@ -143,22 +202,34 @@ default SelfT withSnappyCompression() { } /** - * Configures compression using the Deflate algorithm with the given chunk length and crc check - * chance. - * - * @see #withCompression(String, int, double) + * @deprecated This method only exists for backward compatibility. Will not work with Apache + * Cassandra 5.0 or later due to removal of deprecated table properties (CASSANDRA-18742). Use + * {@link #withDeflateCompression(int)} instead. */ + @Deprecated @NonNull @CheckReturnValue default SelfT withDeflateCompression(int chunkLengthKB, double crcCheckChance) { return withCompression("DeflateCompressor", chunkLengthKB, crcCheckChance); } + /** + * Configures compression using the Deflate algorithm with the given chunk length. + * + * @see #withCompression(String, int) + */ + @NonNull + @CheckReturnValue + default SelfT withDeflateCompression(int chunkLengthKB) { + return withCompression("DeflateCompressor", chunkLengthKB); + } + /** * Configures compression using the Deflate algorithm using the default configuration (64kb - * chunk_length, and 1.0 crc_check_chance). + * chunk_length). * - * @see #withCompression(String, int, double) + * @see #withCompression(String, int) */ @NonNull @CheckReturnValue @@ -168,13 +239,13 @@ default SelfT withDeflateCompression() { /** * Configures compression using the given algorithm using the default configuration (64kb - * chunk_length, and 1.0 crc_check_chance). + * chunk_length). * *

          Unless specifying a custom compression algorithm implementation, it is recommended to use * {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link * #withDeflateCompression()}. * - * @see #withCompression(String, int, double) + * @see #withCompression(String, int) */ @NonNull @CheckReturnValue @@ -183,7 +254,7 @@ default SelfT withCompression(@NonNull String compressionAlgorithmName) { } /** - * Configures compression using the given algorithm, chunk length and crc check chance. + * Configures compression using the given algorithm, chunk length. * *

          Unless specifying a custom compression algorithm implementation, it is recommended to use * {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link @@ -191,11 +262,24 @@ default SelfT withCompression(@NonNull String compressionAlgorithmName) { * * @param compressionAlgorithmName The class name of the compression algorithm. * @param chunkLengthKB The chunk length in KB of compression blocks. Defaults to 64. - * @param crcCheckChance The probability (0.0 to 1.0) that checksum will be checked on each read. - * Defaults to 1.0. */ @NonNull @CheckReturnValue + default SelfT withCompression(@NonNull String compressionAlgorithmName, int chunkLengthKB) { + return withOption( + "compression", + ImmutableMap.of("class", compressionAlgorithmName, "chunk_length_in_kb", chunkLengthKB)); + } + + /** + * @deprecated This method only exists for backward compatibility. Will not work with Apache + * Cassandra 5.0 or later due to removal of deprecated table properties (CASSANDRA-18742). Use + * {@link #withCompression(String, int)} instead. + */ + @NonNull + @CheckReturnValue + @Deprecated default SelfT withCompression( @NonNull String compressionAlgorithmName, int chunkLengthKB, double crcCheckChance) { return withOption( diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java index d6da402bfe7..3716cd03256 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationStructure.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java index a7ff86b01a4..922f596b603 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/CompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java index c65db949b67..5839a2155a9 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/LeveledCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java index 28566555458..b33f6d73744 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/SizeTieredCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java index 0166d924792..a6a1a129da5 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/compaction/TimeWindowCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java index df58d757f1f..dcf59daf06f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/OngoingSelection.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java index 2b8f008d5b0..159657989da 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Select.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,7 @@ package com.datastax.oss.driver.api.querybuilder.select; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; import com.datastax.oss.driver.api.querybuilder.BindMarker; import com.datastax.oss.driver.api.querybuilder.BuildableQuery; @@ -144,6 +147,16 @@ default Select orderBy(@NonNull String columnName, @NonNull ClusteringOrder orde return orderBy(CqlIdentifier.fromCql(columnName), order); } + /** + * Shortcut for {@link #orderByAnnOf(CqlIdentifier, CqlVector)}, adding an ORDER BY ... ANN OF ... + * clause + */ + @NonNull + Select orderByAnnOf(@NonNull String columnName, @NonNull CqlVector ann); + + /** Adds the ORDER BY ... ANN OF ... clause, usually used for vector search */ + @NonNull + Select orderByAnnOf(@NonNull CqlIdentifier columnId, @NonNull CqlVector ann); /** * Adds a LIMIT clause to this query with a literal value. * diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java index c4abb196a04..b39ea8815c6 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/SelectFrom.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java index f3654f74e6f..d82d711b052 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/select/Selector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java index 2cf33b856ca..6ff4d32b7de 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/term/Term.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java index 3936d542d46..081fc9a2c5b 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/truncate/Truncate.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java index d918c8aba42..4c763b9930b 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Assignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,12 +30,14 @@ import com.datastax.oss.driver.internal.querybuilder.update.AppendListElementAssignment; import com.datastax.oss.driver.internal.querybuilder.update.AppendMapEntryAssignment; import com.datastax.oss.driver.internal.querybuilder.update.AppendSetElementAssignment; -import com.datastax.oss.driver.internal.querybuilder.update.CounterAssignment; +import com.datastax.oss.driver.internal.querybuilder.update.DecrementAssignment; import com.datastax.oss.driver.internal.querybuilder.update.DefaultAssignment; +import com.datastax.oss.driver.internal.querybuilder.update.IncrementAssignment; import com.datastax.oss.driver.internal.querybuilder.update.PrependAssignment; import com.datastax.oss.driver.internal.querybuilder.update.PrependListElementAssignment; import com.datastax.oss.driver.internal.querybuilder.update.PrependMapEntryAssignment; import com.datastax.oss.driver.internal.querybuilder.update.PrependSetElementAssignment; +import com.datastax.oss.driver.internal.querybuilder.update.RemoveAssignment; import com.datastax.oss.driver.internal.querybuilder.update.RemoveListElementAssignment; import com.datastax.oss.driver.internal.querybuilder.update.RemoveMapEntryAssignment; import com.datastax.oss.driver.internal.querybuilder.update.RemoveSetElementAssignment; @@ -77,8 +81,8 @@ static Assignment setField( /** Assigns a value to an entry in a map column, as in {@code SET map[?]=?}. */ @NonNull static Assignment setMapValue( - @NonNull CqlIdentifier columnId, @NonNull Term index, @NonNull Term value) { - return new DefaultAssignment(new ColumnComponentLeftOperand(columnId, index), "=", value); + @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { + return new DefaultAssignment(new ColumnComponentLeftOperand(columnId, key), "=", value); } /** @@ -87,14 +91,31 @@ static Assignment setMapValue( */ @NonNull static Assignment setMapValue( + @NonNull String columnName, @NonNull Term key, @NonNull Term value) { + return setMapValue(CqlIdentifier.fromCql(columnName), key, value); + } + + /** Assigns a value to an index in a list column, as in {@code SET list[?]=?}. */ + @NonNull + static Assignment setListValue( + @NonNull CqlIdentifier columnId, @NonNull Term index, @NonNull Term value) { + return new DefaultAssignment(new ColumnComponentLeftOperand(columnId, index), "=", value); + } + + /** + * Shortcut for {@link #setListValue(CqlIdentifier, Term, Term) + * setMapValue(CqlIdentifier.fromCql(columnName), index, value)}. + */ + @NonNull + static Assignment setListValue( @NonNull String columnName, @NonNull Term index, @NonNull Term value) { - return setMapValue(CqlIdentifier.fromCql(columnName), index, value); + return setListValue(CqlIdentifier.fromCql(columnName), index, value); } - /** Increments a counter, as in {@code SET c+=?}. */ + /** Increments a counter, as in {@code SET c=c+?}. */ @NonNull static Assignment increment(@NonNull CqlIdentifier columnId, @NonNull Term amount) { - return new CounterAssignment(new ColumnLeftOperand(columnId), "+=", amount); + return new IncrementAssignment(columnId, amount); } /** @@ -106,7 +127,7 @@ static Assignment increment(@NonNull String columnName, @NonNull Term amount) { return increment(CqlIdentifier.fromCql(columnName), amount); } - /** Increments a counter by 1, as in {@code SET c+=1} . */ + /** Increments a counter by 1, as in {@code SET c=c+1} . */ @NonNull static Assignment increment(@NonNull CqlIdentifier columnId) { return increment(columnId, QueryBuilder.literal(1)); @@ -118,10 +139,10 @@ static Assignment increment(@NonNull String columnName) { return increment(CqlIdentifier.fromCql(columnName)); } - /** Decrements a counter, as in {@code SET c-=?}. */ + /** Decrements a counter, as in {@code SET c=c-?}. */ @NonNull static Assignment decrement(@NonNull CqlIdentifier columnId, @NonNull Term amount) { - return new CounterAssignment(new ColumnLeftOperand(columnId), "-=", amount); + return new DecrementAssignment(columnId, amount); } /** @@ -133,7 +154,7 @@ static Assignment decrement(@NonNull String columnName, @NonNull Term amount) { return decrement(CqlIdentifier.fromCql(columnName), amount); } - /** Decrements a counter by 1, as in {@code SET c-=1} . */ + /** Decrements a counter by 1, as in {@code SET c=c-1} . */ @NonNull static Assignment decrement(@NonNull CqlIdentifier columnId) { return decrement(columnId, QueryBuilder.literal(1)); @@ -146,13 +167,13 @@ static Assignment decrement(@NonNull String columnName) { } /** - * Appends to a collection column, as in {@code SET l+=?}. + * Appends to a collection column, as in {@code SET l=l+?}. * *

          The term must be a collection of the same type as the column. */ @NonNull static Assignment append(@NonNull CqlIdentifier columnId, @NonNull Term suffix) { - return new AppendAssignment(new ColumnLeftOperand(columnId), suffix); + return new AppendAssignment(columnId, suffix); } /** @@ -165,7 +186,7 @@ static Assignment append(@NonNull String columnName, @NonNull Term suffix) { } /** - * Appends a single element to a list column, as in {@code SET l+=[?]}. + * Appends a single element to a list column, as in {@code SET l=l+[?]}. * *

          The term must be of the same type as the column's elements. */ @@ -184,7 +205,7 @@ static Assignment appendListElement(@NonNull String columnName, @NonNull Term su } /** - * Appends a single element to a set column, as in {@code SET s+={?}}. + * Appends a single element to a set column, as in {@code SET s=s+{?}}. * *

          The term must be of the same type as the column's elements. */ @@ -203,7 +224,7 @@ static Assignment appendSetElement(@NonNull String columnName, @NonNull Term suf } /** - * Appends a single entry to a map column, as in {@code SET m+={?:?}}. + * Appends a single entry to a map column, as in {@code SET m=m+{?:?}}. * *

          The terms must be of the same type as the column's keys and values respectively. */ @@ -302,7 +323,7 @@ static Assignment prependMapEntry( } /** - * Removes elements from a collection, as in {@code SET l-=[1,2,3]}. + * Removes elements from a collection, as in {@code SET l=l-[1,2,3]}. * *

          The term must be a collection of the same type as the column. * @@ -313,7 +334,7 @@ static Assignment prependMapEntry( */ @NonNull static Assignment remove(@NonNull CqlIdentifier columnId, @NonNull Term collectionToRemove) { - return new DefaultAssignment(new ColumnLeftOperand(columnId), "-=", collectionToRemove); + return new RemoveAssignment(columnId, collectionToRemove); } /** @@ -326,7 +347,7 @@ static Assignment remove(@NonNull String columnName, @NonNull Term collectionToR } /** - * Removes a single element to a list column, as in {@code SET l-=[?]}. + * Removes a single element from a list column, as in {@code SET l=l-[?]}. * *

          The term must be of the same type as the column's elements. */ @@ -345,7 +366,7 @@ static Assignment removeListElement(@NonNull String columnName, @NonNull Term su } /** - * Removes a single element to a set column, as in {@code SET s-={?}}. + * Removes a single element from a set column, as in {@code SET s=s-{?}}. * *

          The term must be of the same type as the column's elements. */ @@ -364,7 +385,7 @@ static Assignment removeSetElement(@NonNull String columnName, @NonNull Term suf } /** - * Removes a single entry to a map column, as in {@code SET m-={?:?}}. + * Removes a single entry from a map column, as in {@code SET m=m-{?:?}}. * *

          The terms must be of the same type as the column's keys and values respectively. */ diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java index 67af1f09e34..8264c1b4781 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/OngoingAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -106,27 +108,53 @@ default UpdateWithAssignments setField( /** * Assigns a value to an entry in a map column, as in {@code SET map[?]=?}. * - *

          This is a shortcut for {@link #set(Assignment) set(Assignment.setMapValue(columnId, index, + *

          This is a shortcut for {@link #set(Assignment) set(Assignment.setMapValue(columnId, key, * value))}. * * @see Assignment#setMapValue(CqlIdentifier, Term, Term) */ @NonNull default UpdateWithAssignments setMapValue( - @NonNull CqlIdentifier columnId, @NonNull Term index, @NonNull Term value) { - return set(Assignment.setMapValue(columnId, index, value)); + @NonNull CqlIdentifier columnId, @NonNull Term key, @NonNull Term value) { + return set(Assignment.setMapValue(columnId, key, value)); } /** * Shortcut for {@link #setMapValue(CqlIdentifier, Term, Term) - * setMapValue(CqlIdentifier.fromCql(columnName), index, value)}. + * setMapValue(CqlIdentifier.fromCql(columnName), key, value)}. * * @see Assignment#setMapValue(String, Term, Term) */ @NonNull default UpdateWithAssignments setMapValue( + @NonNull String columnName, @NonNull Term key, @NonNull Term value) { + return setMapValue(CqlIdentifier.fromCql(columnName), key, value); + } + + /** + * Assigns a value to an index in a list column, as in {@code SET list[?]=?}. + * + *

          This is a shortcut for {@link #set(Assignment) set(Assignment.setListValue(columnId, index, + * value))}. + * + * @see Assignment#setListValue(CqlIdentifier, Term, Term) + */ + @NonNull + default UpdateWithAssignments setListValue( + @NonNull CqlIdentifier columnId, @NonNull Term index, @NonNull Term value) { + return set(Assignment.setListValue(columnId, index, value)); + } + + /** + * Shortcut for {@link #setListValue(CqlIdentifier, Term, Term) + * setListValue(CqlIdentifier.fromCql(columnName), index, value)}. + * + * @see Assignment#setListValue(String, Term, Term) + */ + @NonNull + default UpdateWithAssignments setListValue( @NonNull String columnName, @NonNull Term index, @NonNull Term value) { - return setMapValue(CqlIdentifier.fromCql(columnName), index, value); + return setListValue(CqlIdentifier.fromCql(columnName), index, value); } /** @@ -221,7 +249,7 @@ default UpdateWithAssignments decrement(@NonNull String columnName) { } /** - * Appends to a collection column, as in {@code SET l+=?}. + * Appends to a collection column, as in {@code SET l=l+?}. * *

          The term must be a collection of the same type as the column. * @@ -246,7 +274,7 @@ default UpdateWithAssignments append(@NonNull String columnName, @NonNull Term s } /** - * Appends a single element to a list column, as in {@code SET l+=[?]}. + * Appends a single element to a list column, as in {@code SET l=l+[?]}. * *

          The term must be of the same type as the column's elements. * @@ -274,7 +302,7 @@ default UpdateWithAssignments appendListElement( } /** - * Appends a single element to a set column, as in {@code SET s+={?}}. + * Appends a single element to a set column, as in {@code SET s=s+{?}}. * *

          The term must be of the same type as the column's elements. * @@ -299,7 +327,7 @@ default UpdateWithAssignments appendSetElement(@NonNull String columnName, @NonN } /** - * Appends a single entry to a map column, as in {@code SET m+={?:?}}. + * Appends a single entry to a map column, as in {@code SET m=m+{?:?}}. * *

          The terms must be of the same type as the column's keys and values respectively. * @@ -436,7 +464,7 @@ default UpdateWithAssignments prependMapEntry( } /** - * Removes elements from a collection, as in {@code SET l-=[1,2,3]}. + * Removes elements from a collection, as in {@code SET l=l-[1,2,3]}. * *

          The term must be a collection of the same type as the column. * @@ -469,7 +497,7 @@ default UpdateWithAssignments remove( } /** - * Removes a single element to a list column, as in {@code SET l-=[?]}. + * Removes a single element to a list column, as in {@code SET l=l-[?]}. * *

          The term must be of the same type as the column's elements. * @@ -497,7 +525,7 @@ default UpdateWithAssignments removeListElement( } /** - * Removes a single element to a set column, as in {@code SET s-={?}}. + * Removes a single element to a set column, as in {@code SET s=s-{?}}. * *

          The term must be of the same type as the column's elements. * @@ -522,7 +550,7 @@ default UpdateWithAssignments removeSetElement(@NonNull String columnName, @NonN } /** - * Removes a single entry to a map column, as in {@code SET m-={?:?}}. + * Removes a single entry to a map column, as in {@code SET m=m-{?:?}}. * *

          The terms must be of the same type as the column's keys and values respectively. * diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java index 5495b910d56..eb791ad0cd1 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/Update.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java index 89d93b8b7c3..de6712c5b93 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateStart.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java index 7f8559289f0..106cb5d12eb 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/update/UpdateWithAssignments.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java index ccb6949a7c5..71a93b87b18 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ArithmeticOperator.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java index 6e4117c3856..55a923e46ad 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/CqlHelper.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java index e6c08f8e063..3d9349b5536 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultLiteral.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java index c60d85d0290..ad07b895304 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/DefaultRaw.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java index 86e7b1e239d..d60d6f737e3 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/ImmutableCollections.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java index 8fa66674b79..4b24c98a85b 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultCondition.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java index 3f3561cfe45..d80bdfc3d61 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/condition/DefaultConditionBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java index 319acf238a9..578950bcd40 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/delete/DefaultDelete.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java index 4ec50e2e918..08717584773 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/insert/DefaultInsert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java index e3bd2a641f4..060308d2ce4 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnComponentLeftOperand.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java index d4d79474bac..7d4a87f1c0a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/ColumnLeftOperand.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java index 76818b737f5..ccfca2a50dc 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/FieldLeftOperand.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java index fb5cdce0824..2eae6ee8382 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/LeftOperand.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java index 986f701fddf..7f0b2d3c9a2 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TokenLeftOperand.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java index 82ce28ffcc5..35e60c3a33c 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/lhs/TupleLeftOperand.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java index 0ed15a5f805..d96ffbe5201 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/CustomIndexRelation.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java index 63a97a831db..139827250ef 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnComponentRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java index 65ac9f1fba2..5f085083bc2 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultColumnRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java index 5f66441313f..a0670c47140 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultMultiColumnRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java index 3807986e611..25786cbfe7f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultRelation.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -50,7 +52,7 @@ public void appendTo(@NonNull StringBuilder builder) { @Override public boolean isIdempotent() { - return rightOperand.isIdempotent(); + return rightOperand == null || rightOperand.isIdempotent(); } @NonNull diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java index fb841d39a73..192eb340bef 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/relation/DefaultTokenRelationBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java index 3e35311cfaf..4dbd876da50 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java index d32f38ba4ce..250e0c37026 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterMaterializedView.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java index d575ced177b..63e7076e717 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterTable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,8 +48,8 @@ public class DefaultAlterTable private final CqlIdentifier keyspace; private final CqlIdentifier tableName; - private final ImmutableMap columnsToAddInOrder; - private final ImmutableSet columnsToAdd; + private final ImmutableMap allColumnsToAddInOrder; + private final ImmutableSet columnsToAddRegular; private final ImmutableSet columnsToAddStatic; private final ImmutableSet columnsToDrop; private final ImmutableMap columnsToRename; @@ -79,8 +81,8 @@ public DefaultAlterTable( @Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier tableName, boolean dropCompactStorage, - @NonNull ImmutableMap columnsToAddInOrder, - @NonNull ImmutableSet columnsToAdd, + @NonNull ImmutableMap allColumnsToAddInOrder, + @NonNull ImmutableSet columnsToAddRegular, @NonNull ImmutableSet columnsToAddStatic, @NonNull ImmutableSet columnsToDrop, @NonNull ImmutableMap columnsToRename, @@ -90,8 +92,8 @@ public DefaultAlterTable( this.keyspace = keyspace; this.tableName = tableName; this.dropCompactStorage = dropCompactStorage; - this.columnsToAddInOrder = columnsToAddInOrder; - this.columnsToAdd = columnsToAdd; + this.allColumnsToAddInOrder = allColumnsToAddInOrder; + this.columnsToAddRegular = columnsToAddRegular; this.columnsToAddStatic = columnsToAddStatic; this.columnsToDrop = columnsToDrop; this.columnsToRename = columnsToRename; @@ -108,8 +110,8 @@ public AlterTableAddColumnEnd addColumn( keyspace, tableName, dropCompactStorage, - ImmutableCollections.append(columnsToAddInOrder, columnName, dataType), - appendSet(columnsToAdd, columnName), + ImmutableCollections.append(allColumnsToAddInOrder, columnName, dataType), + appendSet(columnsToAddRegular, columnName), columnsToAddStatic, columnsToDrop, columnsToRename, @@ -126,8 +128,8 @@ public AlterTableAddColumnEnd addStaticColumn( keyspace, tableName, dropCompactStorage, - ImmutableCollections.append(columnsToAddInOrder, columnName, dataType), - columnsToAdd, + ImmutableCollections.append(allColumnsToAddInOrder, columnName, dataType), + columnsToAddRegular, appendSet(columnsToAddStatic, columnName), columnsToDrop, columnsToRename, @@ -143,8 +145,8 @@ public BuildableQuery dropCompactStorage() { keyspace, tableName, true, - columnsToAddInOrder, - columnsToAdd, + allColumnsToAddInOrder, + columnsToAddRegular, columnsToAddStatic, columnsToDrop, columnsToRename, @@ -166,8 +168,8 @@ public AlterTableDropColumnEnd dropColumns(@NonNull CqlIdentifier... columnNames keyspace, tableName, dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, + allColumnsToAddInOrder, + columnsToAddRegular, columnsToAddStatic, builder.build(), columnsToRename, @@ -184,8 +186,8 @@ public AlterTableRenameColumnEnd renameColumn( keyspace, tableName, dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, + allColumnsToAddInOrder, + columnsToAddRegular, columnsToAddStatic, columnsToDrop, ImmutableCollections.append(columnsToRename, from, to), @@ -201,8 +203,8 @@ public BuildableQuery alterColumn(@NonNull CqlIdentifier columnName, @NonNull Da keyspace, tableName, dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, + allColumnsToAddInOrder, + columnsToAddRegular, columnsToAddStatic, columnsToDrop, columnsToRename, @@ -218,8 +220,8 @@ public AlterTableWithOptionsEnd withOption(@NonNull String name, @NonNull Object keyspace, tableName, dropCompactStorage, - columnsToAddInOrder, - columnsToAdd, + allColumnsToAddInOrder, + columnsToAddRegular, columnsToAddStatic, columnsToDrop, columnsToRename, @@ -242,13 +244,13 @@ public String asCql() { .append(" TYPE ") .append(columnToAlterType.asCql(true, true)) .toString(); - } else if (!columnsToAdd.isEmpty()) { + } else if (!allColumnsToAddInOrder.isEmpty()) { builder.append(" ADD "); - if (columnsToAdd.size() > 1) { + if (allColumnsToAddInOrder.size() > 1) { builder.append('('); } boolean first = true; - for (Map.Entry column : columnsToAddInOrder.entrySet()) { + for (Map.Entry column : allColumnsToAddInOrder.entrySet()) { if (first) { first = false; } else { @@ -263,7 +265,7 @@ public String asCql() { builder.append(" STATIC"); } } - if (columnsToAdd.size() > 1) { + if (allColumnsToAddInOrder.size() > 1) { builder.append(')'); } return builder.toString(); @@ -324,13 +326,13 @@ public CqlIdentifier getTable() { } @NonNull - public ImmutableMap getColumnsToAddInOrder() { - return columnsToAddInOrder; + public ImmutableMap getAllColumnsToAddInOrder() { + return allColumnsToAddInOrder; } @NonNull public ImmutableSet getColumnsToAddRegular() { - return columnsToAdd; + return columnsToAddRegular; } @NonNull diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java index 7a75adf0414..85b96265270 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultAlterType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java index 3dea78d82dd..1e0b4892277 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateAggregate.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java index 77786850ab2..85035cde915 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateFunction.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java index c307bbba178..309beaa4afa 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateIndex.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java index c3908b6e72b..b8cb237d5ff 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java index 710dbfb02df..bfd8fba51eb 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateMaterializedView.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java index 1de5651c2ec..058aeccdd24 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateTable.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java index de5d1841bfe..9f304ced084 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultCreateType.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java index 49a692a1f84..b5d164e77db 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDrop.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java index 2ded8b95e20..905dfa16871 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/DefaultDropKeyspace.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java index 83ff28503ae..4e80f72f1e3 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/OptionsUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/RawOptionsWrapper.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/RawOptionsWrapper.java new file mode 100644 index 00000000000..64cdb50f887 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/RawOptionsWrapper.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.querybuilder.schema; + +import com.datastax.oss.driver.api.core.data.ByteUtils; + +/** + * Wrapper class to indicate that the contained String value should be understood to represent a CQL + * literal that can be included directly in a CQL statement (i.e. without escaping). + */ +public class RawOptionsWrapper { + private final String val; + + private RawOptionsWrapper(String val) { + this.val = val; + } + + public static RawOptionsWrapper of(String val) { + return new RawOptionsWrapper(val); + } + + public static RawOptionsWrapper of(byte[] val) { + return new RawOptionsWrapper(ByteUtils.toHexString(val)); + } + + @Override + public String toString() { + return this.val; + } +} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java index 2c8ccdd6e6a..166c0b29290 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/Utils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java index 42b93c27d50..11d5341fa0a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java index d0ddd4a7420..a6933a9d1b5 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultLeveledCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java index 91c339fd8a4..2c3710452a6 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultSizeTieredCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java index e69ca8c93a3..4a2d28bb87e 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/schema/compaction/DefaultTimeWindowCompactionStrategy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java index ed0c2977420..3d87adf4f0d 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/AllSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java index 61998499cd5..6af0ecfee87 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ArithmeticSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java index c1cdf8cd766..d4e3b652dba 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/BinaryArithmeticSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java index b397cdcaf16..d256407421e 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CastSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java index ccec72cebee..dc9929a0f18 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CollectionSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java index 6404bcc15e7..43dcd46042d 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ColumnSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java index 07357e7c773..4efbaae5924 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/CountAllSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java index 231a812a98f..328c51328ba 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultBindMarker.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java index 6ab7c8a4065..5daf252a9eb 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/DefaultSelect.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +20,10 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.SimpleStatementBuilder; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; import com.datastax.oss.driver.api.querybuilder.BindMarker; +import com.datastax.oss.driver.api.querybuilder.QueryBuilder; import com.datastax.oss.driver.api.querybuilder.relation.Relation; import com.datastax.oss.driver.api.querybuilder.select.Select; import com.datastax.oss.driver.api.querybuilder.select.SelectFrom; @@ -47,6 +51,7 @@ public class DefaultSelect implements SelectFrom, Select { private final ImmutableList relations; private final ImmutableList groupByClauses; private final ImmutableMap orderings; + private final Ann ann; private final Object limit; private final Object perPartitionLimit; private final boolean allowsFiltering; @@ -63,6 +68,7 @@ public DefaultSelect(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier ta ImmutableMap.of(), null, null, + null, false); } @@ -72,6 +78,8 @@ public DefaultSelect(@Nullable CqlIdentifier keyspace, @NonNull CqlIdentifier ta * @param selectors if it contains {@link AllSelector#INSTANCE}, that must be the only element. * This isn't re-checked because methods that call this constructor internally already do it, * make sure you do it yourself. + * @param ann Approximate nearest neighbor. ANN ordering does not support secondary ordering or + * ASC order. */ public DefaultSelect( @Nullable CqlIdentifier keyspace, @@ -82,6 +90,7 @@ public DefaultSelect( @NonNull ImmutableList relations, @NonNull ImmutableList groupByClauses, @NonNull ImmutableMap orderings, + @Nullable Ann ann, @Nullable Object limit, @Nullable Object perPartitionLimit, boolean allowsFiltering) { @@ -92,6 +101,9 @@ public DefaultSelect( || (limit instanceof Integer && (Integer) limit > 0) || limit instanceof BindMarker, "limit must be a strictly positive integer or a bind marker"); + Preconditions.checkArgument( + orderings.isEmpty() || ann == null, "ANN ordering does not support secondary ordering"); + this.ann = ann; this.keyspace = keyspace; this.table = table; this.isJson = isJson; @@ -115,6 +127,7 @@ public SelectFrom json() { relations, groupByClauses, orderings, + ann, limit, perPartitionLimit, allowsFiltering); @@ -132,6 +145,7 @@ public SelectFrom distinct() { relations, groupByClauses, orderings, + ann, limit, perPartitionLimit, allowsFiltering); @@ -191,6 +205,7 @@ public Select withSelectors(@NonNull ImmutableList newSelectors) { relations, groupByClauses, orderings, + ann, limit, perPartitionLimit, allowsFiltering); @@ -219,6 +234,7 @@ public Select withRelations(@NonNull ImmutableList newRelations) { newRelations, groupByClauses, orderings, + ann, limit, perPartitionLimit, allowsFiltering); @@ -247,6 +263,7 @@ public Select withGroupByClauses(@NonNull ImmutableList newGroupByClau relations, newGroupByClauses, orderings, + ann, limit, perPartitionLimit, allowsFiltering); @@ -258,6 +275,18 @@ public Select orderBy(@NonNull CqlIdentifier columnId, @NonNull ClusteringOrder return withOrderings(ImmutableCollections.append(orderings, columnId, order)); } + @NonNull + @Override + public Select orderByAnnOf(@NonNull String columnName, @NonNull CqlVector ann) { + return withAnn(new Ann(CqlIdentifier.fromCql(columnName), ann)); + } + + @NonNull + @Override + public Select orderByAnnOf(@NonNull CqlIdentifier columnId, @NonNull CqlVector ann) { + return withAnn(new Ann(columnId, ann)); + } + @NonNull @Override public Select orderByIds(@NonNull Map newOrderings) { @@ -275,6 +304,24 @@ public Select withOrderings(@NonNull ImmutableMap entry : orderings.entrySet()) { - if (first) { - builder.append(" ORDER BY "); - first = false; - } else { - builder.append(","); + if (ann != null) { + builder.append(" ORDER BY ").append(this.ann.columnId.asCql(true)).append(" ANN OF "); + QueryBuilder.literal(ann.vector).appendTo(builder); + } else { + boolean first = true; + for (Map.Entry entry : orderings.entrySet()) { + if (first) { + builder.append(" ORDER BY "); + first = false; + } else { + builder.append(","); + } + builder.append(entry.getKey().asCql(true)).append(" ").append(entry.getValue().name()); } - builder.append(entry.getKey().asCql(true)).append(" ").append(entry.getValue().name()); } if (limit != null) { @@ -497,6 +554,11 @@ public Object getLimit() { return limit; } + @Nullable + public Ann getAnn() { + return ann; + } + @Nullable public Object getPerPartitionLimit() { return perPartitionLimit; @@ -510,4 +572,14 @@ public boolean allowsFiltering() { public String toString() { return asCql(); } + + public static class Ann { + private final CqlVector vector; + private final CqlIdentifier columnId; + + private Ann(CqlIdentifier columnId, CqlVector vector) { + this.vector = vector; + this.columnId = columnId; + } + } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java index 1577fb30b9f..d2ac61e8aee 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ElementSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java index 6147903f633..a39a270d9f9 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FieldSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java index 3642008405f..98a0bb07c41 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/FunctionSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java index 2d62fe38f59..2a8ea73e474 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/ListSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java index f7fde80d5bd..27f28d3e0e2 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/MapSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java index b6a18cf51f5..05d27421cb8 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/OppositeSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java index a21a8d0c59e..e63eef0da50 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/RangeSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java index a291247d546..4e2d4221a31 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/SetSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java index 2058313eabb..3d3a351f7c8 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TupleSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java index 3f8f15ba729..491ffe16adc 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/select/TypeHintSelector.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java index fdb1c2210ae..2d6f2094b07 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/ArithmeticTerm.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java index a29c6cbfc53..05e829af9f8 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/BinaryArithmeticTerm.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java index e91ed04b775..0980925288e 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/FunctionTerm.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java index 750bea39167..28010befc44 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/OppositeTerm.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java index c9c14dab3ec..eb6bd94c6e3 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TupleTerm.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java index 3ba9b53eb9f..9ed45f852a5 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/term/TypeHintTerm.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java index 75b0eb2dec4..f3aa0006756 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/truncate/DefaultTruncate.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java index 271c0bcca16..7d2c653cee6 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +17,15 @@ */ package com.datastax.oss.driver.internal.querybuilder.update; +import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.LeftOperand; import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.Immutable; @Immutable -public class AppendAssignment extends DefaultAssignment { - - public AppendAssignment(@NonNull LeftOperand leftOperand, @NonNull Term rightOperand) { - super(leftOperand, "+=", rightOperand); - } +public class AppendAssignment extends CollectionAssignment { - @Override - public boolean isIdempotent() { - // Not idempotent for lists, be pessimistic - return false; + public AppendAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { + super(columnId, Operator.APPEND, value); } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java index 0005efaf7e2..717e07f9026 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendListElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java index c04c6e23b7e..1001fa919e1 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendMapEntryAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java index 6edf7108d76..6d3a11afc8a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/AppendSetElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionAssignment.java new file mode 100644 index 00000000000..3dddd21d143 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionAssignment.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.querybuilder.update; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.querybuilder.term.Term; +import com.datastax.oss.driver.api.querybuilder.update.Assignment; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.Immutable; + +@Immutable +public abstract class CollectionAssignment implements Assignment { + + public enum Operator { + APPEND("%1$s=%1$s+%2$s"), + PREPEND("%1$s=%2$s+%1$s"), + REMOVE("%1$s=%1$s-%2$s"), + ; + + public final String pattern; + + Operator(String pattern) { + this.pattern = pattern; + } + } + + private final CqlIdentifier columnId; + private final Operator operator; + private final Term value; + + protected CollectionAssignment( + @NonNull CqlIdentifier columnId, @NonNull Operator operator, @NonNull Term value) { + Preconditions.checkNotNull(columnId); + Preconditions.checkNotNull(value); + this.columnId = columnId; + this.operator = operator; + this.value = value; + } + + @Override + public void appendTo(@NonNull StringBuilder builder) { + builder.append(String.format(operator.pattern, columnId.asCql(true), buildRightOperand())); + } + + private String buildRightOperand() { + StringBuilder builder = new StringBuilder(); + value.appendTo(builder); + return builder.toString(); + } + + @Override + public boolean isIdempotent() { + // REMOVE is idempotent if the collection being removed is idempotent; APPEND and PREPEND are + // not idempotent for lists, so be pessimistic + return operator == Operator.REMOVE && value.isIdempotent(); + } + + @NonNull + public CqlIdentifier getColumnId() { + return columnId; + } + + @NonNull + public Term getValue() { + return value; + } +} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java index 35da1ca0f6e..d4b2f532155 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CollectionElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,9 +29,9 @@ public abstract class CollectionElementAssignment implements Assignment { public enum Operator { - APPEND("%s+=%s"), + APPEND("%1$s=%1$s+%2$s"), PREPEND("%1$s=%2$s+%1$s"), - REMOVE("%s-=%s"), + REMOVE("%1$s=%1$s-%2$s"), ; public final String pattern; diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java index ff1280de5dd..3751255eef9 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/CounterAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +17,64 @@ */ package com.datastax.oss.driver.internal.querybuilder.update; +import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.internal.querybuilder.lhs.LeftOperand; +import com.datastax.oss.driver.api.querybuilder.update.Assignment; +import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.Immutable; @Immutable -public class CounterAssignment extends DefaultAssignment { +public abstract class CounterAssignment implements Assignment { + + public enum Operator { + INCREMENT("%1$s=%1$s+%2$s"), + DECREMENT("%1$s=%1$s-%2$s"), + ; + + public final String pattern; + + Operator(String pattern) { + this.pattern = pattern; + } + } + + private final CqlIdentifier columnId; + private final Operator operator; + private final Term value; - public CounterAssignment( - @NonNull LeftOperand leftOperand, @NonNull String operator, @NonNull Term rightOperand) { - super(leftOperand, operator, rightOperand); + protected CounterAssignment( + @NonNull CqlIdentifier columnId, @NonNull Operator operator, @NonNull Term value) { + Preconditions.checkNotNull(columnId); + Preconditions.checkNotNull(value); + this.columnId = columnId; + this.operator = operator; + this.value = value; + } + + @Override + public void appendTo(@NonNull StringBuilder builder) { + builder.append(String.format(operator.pattern, columnId.asCql(true), buildRightOperand())); + } + + private String buildRightOperand() { + StringBuilder builder = new StringBuilder(); + value.appendTo(builder); + return builder.toString(); } @Override public boolean isIdempotent() { return false; } + + @NonNull + public CqlIdentifier getColumnId() { + return columnId; + } + + @NonNull + public Term getValue() { + return value; + } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DecrementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DecrementAssignment.java new file mode 100644 index 00000000000..aabf59019c6 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DecrementAssignment.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.querybuilder.update; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.querybuilder.term.Term; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.Immutable; + +@Immutable +public class DecrementAssignment extends CounterAssignment { + + public DecrementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { + super(columnId, Operator.DECREMENT, value); + } +} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java index b889831d5fd..7f138c21d43 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java index 0e864f28e3b..4d9d18f3aa4 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/DefaultUpdate.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/IncrementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/IncrementAssignment.java new file mode 100644 index 00000000000..4aba6b983f6 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/IncrementAssignment.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.querybuilder.update; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.querybuilder.term.Term; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.Immutable; + +@Immutable +public class IncrementAssignment extends CounterAssignment { + + public IncrementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { + super(columnId, Operator.INCREMENT, value); + } +} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java index d58a6ffa18e..4094f3272a8 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,42 +19,13 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.querybuilder.term.Term; -import com.datastax.oss.driver.api.querybuilder.update.Assignment; import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.Immutable; @Immutable -public class PrependAssignment implements Assignment { - - private final CqlIdentifier columnId; - private final Term prefix; +public class PrependAssignment extends CollectionAssignment { public PrependAssignment(@NonNull CqlIdentifier columnId, @NonNull Term prefix) { - this.columnId = columnId; - this.prefix = prefix; - } - - @Override - public void appendTo(@NonNull StringBuilder builder) { - String column = columnId.asCql(true); - builder.append(column).append('='); - prefix.appendTo(builder); - builder.append('+').append(column); - } - - @Override - public boolean isIdempotent() { - // Not idempotent for lists, be pessimistic - return false; - } - - @NonNull - public CqlIdentifier getColumnId() { - return columnId; - } - - @NonNull - public Term getPrefix() { - return prefix; + super(columnId, Operator.PREPEND, prefix); } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java index a9bc032c432..de73a9d0840 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependListElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java index 691ab6461be..093d1e58613 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependMapEntryAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java index 7924a0d6afe..00083648aa4 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/PrependSetElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveAssignment.java new file mode 100644 index 00000000000..618ccdbdc89 --- /dev/null +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveAssignment.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.internal.querybuilder.update; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.querybuilder.term.Term; +import edu.umd.cs.findbugs.annotations.NonNull; +import net.jcip.annotations.Immutable; + +@Immutable +public class RemoveAssignment extends CollectionAssignment { + + public RemoveAssignment(@NonNull CqlIdentifier columnId, @NonNull Term value) { + super(columnId, Operator.REMOVE, value); + } +} diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java index 985a871fe5e..ce60c6e1d9a 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveListElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,9 +28,4 @@ public class RemoveListElementAssignment extends CollectionElementAssignment { public RemoveListElementAssignment(@NonNull CqlIdentifier columnId, @NonNull Term element) { super(columnId, Operator.REMOVE, null, element, '[', ']'); } - - @Override - public boolean isIdempotent() { - return false; - } } diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java index c87e30250fc..598dd215e5f 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveMapEntryAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java index f863dd7f67f..7a8e73da1fd 100644 --- a/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java +++ b/query-builder/src/main/java/com/datastax/oss/driver/internal/querybuilder/update/RemoveSetElementAssignment.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/Assertions.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/Assertions.java new file mode 100644 index 00000000000..b1a463378e0 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/Assertions.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import com.datastax.oss.driver.api.querybuilder.CqlSnippet; + +public class Assertions extends org.assertj.core.api.Assertions { + + public static BuildableQueryAssert assertThat(BuildableQuery actual) { + return new BuildableQueryAssert(actual); + } + + public static CqlSnippetAssert assertThat(CqlSnippet actual) { + return new CqlSnippetAssert(actual); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/BuildableQueryAssert.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/BuildableQueryAssert.java new file mode 100644 index 00000000000..3173723353a --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/BuildableQueryAssert.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.querybuilder.BuildableQuery; +import org.assertj.core.api.AbstractAssert; + +public class BuildableQueryAssert extends AbstractAssert { + + public BuildableQueryAssert(BuildableQuery actual) { + super(actual, BuildableQueryAssert.class); + } + + public BuildableQueryAssert hasCql(String expected) { + assertThat(actual.asCql()).isEqualTo(expected); + return this; + } + + public BuildableQueryAssert isIdempotent() { + assertThat(actual.build().isIdempotent()).isTrue(); + return this; + } + + public BuildableQueryAssert isNotIdempotent() { + assertThat(actual.build().isIdempotent()).isFalse(); + return this; + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/CqlSnippetAssert.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/CqlSnippetAssert.java new file mode 100644 index 00000000000..1aa165d1319 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/CqlSnippetAssert.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.querybuilder.CqlSnippet; +import org.assertj.core.api.AbstractAssert; + +public class CqlSnippetAssert extends AbstractAssert { + + public CqlSnippetAssert(CqlSnippet actual) { + super(actual, CqlSnippetAssert.class); + } + + public CqlSnippetAssert hasCql(String expected) { + StringBuilder builder = new StringBuilder(); + actual.appendTo(builder); + assertThat(builder.toString()).isEqualTo(expected); + return this; + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceTest.java new file mode 100644 index 00000000000..a3e2c44cfac --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseKeyspaceTest.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.alterDseKeyspace; + +import org.junit.Test; + +public class AlterDseKeyspaceTest { + + @Test + public void should_not_throw_on_toString_for_AlterKeyspaceStart() { + assertThat(alterDseKeyspace("foo").toString()).isEqualTo("ALTER KEYSPACE foo"); + } + + @Test + public void should_generate_alter_keyspace_with_replication() { + assertThat(alterDseKeyspace("foo").withSimpleStrategy(3)) + .hasCql( + "ALTER KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':3}"); + } + + @Test + public void should_generate_alter_keyspace_with_graph_engine() { + assertThat(alterDseKeyspace("foo").withSimpleStrategy(3).withGraphEngine("Core")) + .hasCql( + "ALTER KEYSPACE foo " + + "WITH replication={'class':'SimpleStrategy','replication_factor':3} " + + "AND graph_engine='Core'"); + } + + @Test + public void should_generate_alter_keyspace_with_durable_writes_and_options() { + assertThat(alterDseKeyspace("foo").withDurableWrites(true).withOption("hello", "world")) + .hasCql("ALTER KEYSPACE foo WITH durable_writes=true AND hello='world'"); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableTest.java new file mode 100644 index 00000000000..b2b5965fbc7 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/AlterDseTableTest.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.alterDseTable; +import static com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide.table; + +import com.datastax.oss.driver.api.core.type.DataTypes; +import org.junit.Test; + +public class AlterDseTableTest { + + @Test + public void should_not_throw_on_toString_for_AlterTableStart() { + assertThat(alterDseTable("foo").toString()).isEqualTo("ALTER TABLE foo"); + } + + @Test + public void should_generate_alter_table_with_alter_column_type() { + assertThat(alterDseTable("foo", "bar").alterColumn("x", DataTypes.TEXT)) + .hasCql("ALTER TABLE foo.bar ALTER x TYPE text"); + } + + @Test + public void should_generate_alter_table_with_add_single_column() { + assertThat(alterDseTable("foo", "bar").addColumn("x", DataTypes.TEXT)) + .hasCql("ALTER TABLE foo.bar ADD x text"); + } + + @Test + public void should_generate_alter_table_with_add_three_columns() { + assertThat( + alterDseTable("foo", "bar") + .addColumn("x", DataTypes.TEXT) + .addStaticColumn("y", DataTypes.FLOAT) + .addColumn("z", DataTypes.DOUBLE)) + .hasCql("ALTER TABLE foo.bar ADD (x text,y float STATIC,z double)"); + } + + @Test + public void should_generate_alter_table_with_drop_single_column() { + assertThat(alterDseTable("foo", "bar").dropColumn("x")).hasCql("ALTER TABLE foo.bar DROP x"); + } + + @Test + public void should_generate_alter_table_with_drop_two_columns() { + assertThat(alterDseTable("foo", "bar").dropColumn("x").dropColumn("y")) + .hasCql("ALTER TABLE foo.bar DROP (x,y)"); + } + + @Test + public void should_generate_alter_table_with_drop_two_columns_at_once() { + assertThat(alterDseTable("foo", "bar").dropColumns("x", "y")) + .hasCql("ALTER TABLE foo.bar DROP (x,y)"); + } + + @Test + public void should_generate_alter_table_with_rename_single_column() { + assertThat(alterDseTable("foo", "bar").renameColumn("x", "y")) + .hasCql("ALTER TABLE foo.bar RENAME x TO y"); + } + + @Test + public void should_generate_alter_table_with_rename_three_columns() { + assertThat( + alterDseTable("foo", "bar") + .renameColumn("x", "y") + .renameColumn("u", "v") + .renameColumn("b", "a")) + .hasCql("ALTER TABLE foo.bar RENAME x TO y AND u TO v AND b TO a"); + } + + @Test + public void should_generate_alter_table_with_drop_compact_storage() { + assertThat(alterDseTable("bar").dropCompactStorage()) + .hasCql("ALTER TABLE bar DROP COMPACT STORAGE"); + } + + @Test + public void should_generate_alter_table_with_options() { + assertThat(alterDseTable("bar").withComment("Hello").withCDC(true)) + .hasCql("ALTER TABLE bar WITH comment='Hello' AND cdc=true"); + } + + @Test + public void should_generate_alter_table_with_no_compression() { + assertThat(alterDseTable("bar").withNoCompression()) + .hasCql("ALTER TABLE bar WITH compression={'sstable_compression':''}"); + } + + @Test + public void should_generate_alter_table_to_add_anonymous_vertex_label() { + assertThat(alterDseTable("bar").withVertexLabel()).hasCql("ALTER TABLE bar WITH VERTEX LABEL"); + } + + @Test + public void should_generate_alter_table_to_add_named_vertex_label() { + assertThat(alterDseTable("bar").withVertexLabel("baz")) + .hasCql("ALTER TABLE bar WITH VERTEX LABEL baz"); + } + + @Test + public void should_generate_alter_table_to_remove_anonymous_vertex_label() { + assertThat(alterDseTable("bar").withoutVertexLabel()) + .hasCql("ALTER TABLE bar WITHOUT VERTEX LABEL"); + } + + @Test + public void should_generate_alter_table_to_remove_named_vertex_label() { + assertThat(alterDseTable("bar").withoutVertexLabel("baz")) + .hasCql("ALTER TABLE bar WITHOUT VERTEX LABEL baz"); + } + + @Test + public void should_generate_alter_table_to_add_anonymous_edge_label() { + assertThat( + alterDseTable("bar") + .withEdgeLabel( + table("source").withPartitionKey("pk"), + table("dest") + .withPartitionKey("pk1") + .withPartitionKey("pk2") + .withClusteringColumn("cc"))) + .hasCql("ALTER TABLE bar WITH EDGE LABEL FROM source(pk) TO dest((pk1,pk2),cc)"); + } + + @Test + public void should_generate_alter_table_to_add_named_edge_label() { + assertThat( + alterDseTable("bar") + .withEdgeLabel( + "e", + table("source").withPartitionKey("pk"), + table("dest") + .withPartitionKey("pk1") + .withPartitionKey("pk2") + .withClusteringColumn("cc"))) + .hasCql("ALTER TABLE bar WITH EDGE LABEL e FROM source(pk) TO dest((pk1,pk2),cc)"); + } + + @Test + public void should_generate_alter_table_to_remove_anonymous_edge_label() { + assertThat(alterDseTable("bar").withoutEdgeLabel()) + .hasCql("ALTER TABLE bar WITHOUT EDGE LABEL"); + } + + @Test + public void should_generate_alter_table_to_remove_named_edge_label() { + assertThat(alterDseTable("bar").withoutEdgeLabel("baz")) + .hasCql("ALTER TABLE bar WITHOUT EDGE LABEL baz"); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceTest.java new file mode 100644 index 00000000000..d92659b2d1c --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseKeyspaceTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseKeyspace; + +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import org.junit.Test; + +public class CreateDseKeyspaceTest { + + @Test + public void should_not_throw_on_toString_for_CreateKeyspaceStart() { + assertThat(createDseKeyspace("foo").toString()).isEqualTo("CREATE KEYSPACE foo"); + } + + @Test + public void should_generate_create_keyspace_simple_strategy() { + assertThat(createDseKeyspace("foo").withSimpleStrategy(5)) + .hasCql( + "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':5}"); + } + + @Test + public void should_generate_create_keyspace_simple_strategy_and_durable_writes() { + assertThat(createDseKeyspace("foo").withSimpleStrategy(5).withDurableWrites(true)) + .hasCql( + "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':5} AND durable_writes=true"); + } + + @Test + public void should_generate_create_keyspace_if_not_exists() { + assertThat(createDseKeyspace("foo").ifNotExists().withSimpleStrategy(2)) + .hasCql( + "CREATE KEYSPACE IF NOT EXISTS foo WITH replication={'class':'SimpleStrategy','replication_factor':2}"); + } + + @Test + public void should_generate_create_keyspace_network_topology_strategy() { + assertThat( + createDseKeyspace("foo") + .withNetworkTopologyStrategy(ImmutableMap.of("dc1", 3, "dc2", 4))) + .hasCql( + "CREATE KEYSPACE foo WITH replication={'class':'NetworkTopologyStrategy','dc1':3,'dc2':4}"); + } + + @Test + public void should_generate_create_keyspace_with_graph_engine() { + assertThat( + createDseKeyspace("foo") + .ifNotExists() + .withNetworkTopologyStrategy(ImmutableMap.of("dc1", 3, "dc2", 4)) + .withDurableWrites(true) + .withGraphEngine("Core")) + .hasCql( + "CREATE KEYSPACE IF NOT EXISTS foo " + + "WITH replication={'class':'NetworkTopologyStrategy','dc1':3,'dc2':4} " + + "AND durable_writes=true " + + "AND graph_engine='Core'"); + } + + @Test + public void should_generate_create_keyspace_with_custom_properties() { + assertThat( + createDseKeyspace("foo") + .withSimpleStrategy(3) + .withOption("awesome_feature", true) + .withOption("wow_factor", 11) + .withOption("random_string", "hi")) + .hasCql( + "CREATE KEYSPACE foo WITH replication={'class':'SimpleStrategy','replication_factor':3} AND awesome_feature=true AND wow_factor=11 AND random_string='hi'"); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java new file mode 100644 index 00000000000..d8ee1c4e380 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java @@ -0,0 +1,457 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.api.querybuilder.schema; + +import static com.datastax.dse.driver.api.querybuilder.Assertions.assertThat; +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseTable; +import static com.datastax.dse.driver.api.querybuilder.schema.DseGraphEdgeSide.table; +import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; + +import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; +import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder; +import com.datastax.oss.driver.api.querybuilder.SchemaBuilder.RowsPerPartition; +import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.CompactionWindowUnit; +import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.TimestampResolution; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import org.junit.Test; + +public class CreateDseTableTest { + + @Test + public void should_not_throw_on_toString_for_CreateTableStart() { + assertThat(createDseTable("foo").toString()).isEqualTo("CREATE TABLE foo"); + } + + @Test + public void should_generate_create_table_if_not_exists() { + assertThat(createDseTable("bar").ifNotExists().withPartitionKey("k", DataTypes.INT)) + .hasCql("CREATE TABLE IF NOT EXISTS bar (k int PRIMARY KEY)"); + } + + @Test + public void should_generate_create_table_with_single_partition_key() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT)) + .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text)"); + } + + @Test + public void should_generate_create_table_with_compound_partition_key() { + assertThat( + createDseTable("bar") + .withPartitionKey("kc", DataTypes.INT) + .withPartitionKey("ka", DataTypes.TIMESTAMP) + .withColumn("v", DataTypes.TEXT)) + .hasCql("CREATE TABLE bar (kc int,ka timestamp,v text,PRIMARY KEY((kc,ka)))"); + } + + @Test + public void should_generate_create_table_with_single_partition_key_and_clustering_column() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withClusteringColumn("c", DataTypes.TEXT) + .withColumn("v", udt("val", true))) + .hasCql("CREATE TABLE bar (k int,c text,v frozen,PRIMARY KEY(k,c))"); + } + + @Test + public void should_generate_create_table_with_static_column() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withClusteringColumn("c", DataTypes.TEXT) + .withStaticColumn("s", DataTypes.TIMEUUID) + .withColumn("v", udt("val", true))) + .hasCql("CREATE TABLE bar (k int,c text,s timeuuid STATIC,v frozen,PRIMARY KEY(k,c))"); + } + + @Test + public void should_generate_create_table_with_compound_partition_key_and_clustering_columns() { + assertThat( + createDseTable("bar") + .withPartitionKey("kc", DataTypes.INT) + .withPartitionKey("ka", DataTypes.TIMESTAMP) + .withClusteringColumn("c", DataTypes.FLOAT) + .withClusteringColumn("a", DataTypes.UUID) + .withColumn("v", DataTypes.TEXT)) + .hasCql( + "CREATE TABLE bar (kc int,ka timestamp,c float,a uuid,v text,PRIMARY KEY((kc,ka),c,a))"); + } + + @Test + public void should_generate_create_table_with_compact_storage() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCompactStorage()) + .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text) WITH COMPACT STORAGE"); + } + + @Test + public void should_generate_create_table_with_clustering_single() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withClusteringColumn("c", DataTypes.TEXT) + .withColumn("v", DataTypes.TEXT) + .withClusteringOrder("c", ClusteringOrder.ASC)) + .hasCql( + "CREATE TABLE bar (k int,c text,v text,PRIMARY KEY(k,c)) WITH CLUSTERING ORDER BY (c ASC)"); + } + + @Test + public void should_generate_create_table_with_clustering_three() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withClusteringColumn("c0", DataTypes.TEXT) + .withClusteringColumn("c1", DataTypes.TEXT) + .withClusteringColumn("c2", DataTypes.TEXT) + .withColumn("v", DataTypes.TEXT) + .withClusteringOrder("c0", ClusteringOrder.DESC) + .withClusteringOrder( + ImmutableMap.of("c1", ClusteringOrder.ASC, "c2", ClusteringOrder.DESC))) + .hasCql( + "CREATE TABLE bar (k int,c0 text,c1 text,c2 text,v text,PRIMARY KEY(k,c0,c1,c2)) WITH CLUSTERING ORDER BY (c0 DESC,c1 ASC,c2 DESC)"); + } + + @Test + public void should_generate_create_table_with_compact_storage_and_default_ttl() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCompactStorage() + .withDefaultTimeToLiveSeconds(86400)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH COMPACT STORAGE AND default_time_to_live=86400"); + } + + @Test + public void should_generate_create_table_with_clustering_compact_storage_and_default_ttl() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withClusteringColumn("c", DataTypes.TEXT) + .withColumn("v", DataTypes.TEXT) + .withCompactStorage() + .withClusteringOrder("c", ClusteringOrder.DESC) + .withDefaultTimeToLiveSeconds(86400)) + .hasCql( + "CREATE TABLE bar (k int,c text,v text,PRIMARY KEY(k,c)) WITH COMPACT STORAGE AND CLUSTERING ORDER BY (c DESC) AND default_time_to_live=86400"); + } + + @Test + public void should_generate_create_table_with_options() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withBloomFilterFpChance(0.42) + .withCDC(false) + .withComment("Hello world") + .withDcLocalReadRepairChance(0.54) + .withDefaultTimeToLiveSeconds(86400) + .withGcGraceSeconds(864000) + .withMemtableFlushPeriodInMs(10000) + .withMinIndexInterval(1024) + .withMaxIndexInterval(4096) + .withReadRepairChance(0.55) + .withSpeculativeRetry("99percentile")) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH bloom_filter_fp_chance=0.42 AND cdc=false AND comment='Hello world' AND dclocal_read_repair_chance=0.54 AND default_time_to_live=86400 AND gc_grace_seconds=864000 AND memtable_flush_period_in_ms=10000 AND min_index_interval=1024 AND max_index_interval=4096 AND read_repair_chance=0.55 AND speculative_retry='99percentile'"); + } + + @Test + public void should_generate_create_table_lz4_compression() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withLZ4Compression()) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor'}"); + } + + @Test + public void should_generate_create_table_lz4_compression_options() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withLZ4Compression(1024)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_in_kb':1024}"); + } + + @Test + public void should_generate_create_table_lz4_compression_options_crc() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withLZ4Compression(1024, .5)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}"); + } + + @Test + public void should_generate_create_table_zstd_compression() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withZstdCompression()) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor'}"); + } + + @Test + public void should_generate_create_table_zstd_compression_options() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withZstdCompression(1024)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor','chunk_length_in_kb':1024}"); + } + + @Test + public void should_generate_create_table_snappy_compression() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withSnappyCompression()) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor'}"); + } + + @Test + public void should_generate_create_table_snappy_compression_options() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withSnappyCompression(2048)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}"); + } + + @Test + public void should_generate_create_table_snappy_compression_options_crc() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withSnappyCompression(2048, .25)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_kb':2048,'crc_check_chance':0.25}"); + } + + @Test + public void should_generate_create_table_deflate_compression() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withDeflateCompression()) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor'}"); + } + + @Test + public void should_generate_create_table_deflate_compression_options() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withDeflateCompression(4096)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}"); + } + + @Test + public void should_generate_create_table_deflate_compression_options_crc() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withDeflateCompression(4096, .1)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_kb':4096,'crc_check_chance':0.1}"); + } + + @Test + public void should_generate_create_table_caching_options() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCaching(true, RowsPerPartition.rows(10))) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH caching={'keys':'ALL','rows_per_partition':'10'}"); + } + + @Test + public void should_generate_create_table_size_tiered_compaction() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCompaction( + SchemaBuilder.sizeTieredCompactionStrategy() + .withBucketHigh(1.6) + .withBucketLow(0.6) + .withColdReadsToOmit(0.1) + .withMaxThreshold(33) + .withMinThreshold(5) + .withMinSSTableSizeInBytes(50000) + .withOnlyPurgeRepairedTombstones(true) + .withEnabled(false) + .withTombstoneCompactionIntervalInSeconds(86400) + .withTombstoneThreshold(0.22) + .withUncheckedTombstoneCompaction(true))) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'SizeTieredCompactionStrategy','bucket_high':1.6,'bucket_low':0.6,'cold_reads_to_omit':0.1,'max_threshold':33,'min_threshold':5,'min_sstable_size':50000,'only_purge_repaired_tombstones':true,'enabled':false,'tombstone_compaction_interval':86400,'tombstone_threshold':0.22,'unchecked_tombstone_compaction':true}"); + } + + @Test + public void should_generate_create_table_leveled_compaction() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCompaction( + SchemaBuilder.leveledCompactionStrategy() + .withSSTableSizeInMB(110) + .withTombstoneCompactionIntervalInSeconds(3600))) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'LeveledCompactionStrategy','sstable_size_in_mb':110,'tombstone_compaction_interval':3600}"); + } + + @Test + public void should_generate_create_table_time_window_compaction() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCompaction( + SchemaBuilder.timeWindowCompactionStrategy() + .withCompactionWindow(10, CompactionWindowUnit.DAYS) + .withTimestampResolution(TimestampResolution.MICROSECONDS) + .withUnsafeAggressiveSSTableExpiration(false))) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'TimeWindowCompactionStrategy','compaction_window_size':10,'compaction_window_unit':'DAYS','timestamp_resolution':'MICROSECONDS','unsafe_aggressive_sstable_expiration':false}"); + } + + @Test + public void should_generate_create_table_with_anonymous_vertex() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withComment("test") + .withVertexLabel() + .withCaching(true, RowsPerPartition.rows(10))) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) " + + "WITH VERTEX LABEL " + + "AND comment='test' " + + "AND caching={'keys':'ALL','rows_per_partition':'10'}"); + } + + @Test + public void should_generate_create_table_with_named_vertex() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withComment("test") + .withVertexLabel("b") + .withCaching(true, RowsPerPartition.rows(10))) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) " + + "WITH VERTEX LABEL b " + + "AND comment='test' " + + "AND caching={'keys':'ALL','rows_per_partition':'10'}"); + } + + @Test + public void should_generate_create_table_with_anonymous_edge() { + assertThat( + createDseTable("contributors") + .withPartitionKey("contributor", DataTypes.TEXT) + .withClusteringColumn("company_name", DataTypes.TEXT) + .withClusteringColumn("software_name", DataTypes.TEXT) + .withClusteringColumn("software_version", DataTypes.INT) + .withEdgeLabel( + table("person").withPartitionKey("contributor"), + table("soft") + .withPartitionKey("company_name") + .withPartitionKey("software_name") + .withClusteringColumn("software_version"))) + .hasCql( + "CREATE TABLE contributors (contributor text,company_name text,software_name text,software_version int," + + "PRIMARY KEY(contributor,company_name,software_name,software_version)) " + + "WITH EDGE LABEL " + + "FROM person(contributor) " + + "TO soft((company_name,software_name),software_version)"); + } + + @Test + public void should_generate_create_table_with_named_edge() { + assertThat( + createDseTable("contributors") + .withPartitionKey("contributor", DataTypes.TEXT) + .withClusteringColumn("company_name", DataTypes.TEXT) + .withClusteringColumn("software_name", DataTypes.TEXT) + .withClusteringColumn("software_version", DataTypes.INT) + .withClusteringOrder("company_name", ClusteringOrder.ASC) + .withEdgeLabel( + "contrib", + table("person").withPartitionKey("contributor"), + table("soft") + .withPartitionKey("company_name") + .withPartitionKey("software_name") + .withClusteringColumn("software_version"))) + .hasCql( + "CREATE TABLE contributors (contributor text,company_name text,software_name text,software_version int," + + "PRIMARY KEY(contributor,company_name,software_name,software_version)) " + + "WITH CLUSTERING ORDER BY (company_name ASC) " + + "AND EDGE LABEL contrib " + + "FROM person(contributor) " + + "TO soft((company_name,software_name),software_version)"); + } + + @Test + public void should_generate_create_table_crc_check_chance() { + assertThat( + createDseTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withCRCCheckChance(0.8)) + .hasCql("CREATE TABLE bar (k int PRIMARY KEY,v text) WITH crc_check_chance=0.8"); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/DependencyCheckTest.java b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/DependencyCheckTest.java new file mode 100644 index 00000000000..ff5dd1e66a4 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/DependencyCheckTest.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder; + +import com.datastax.dse.driver.internal.DependencyCheckTestBase; +import java.nio.file.Path; +import java.nio.file.Paths; + +public class DependencyCheckTest extends DependencyCheckTestBase { + + @Override + protected Path getDepsTxtPath() { + return Paths.get( + getBaseResourcePathString(), + "target", + "classes", + "com", + "datastax", + "dse", + "driver", + "internal", + "querybuilder", + "deps.txt"); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseAggregateTest.java b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseAggregateTest.java new file mode 100644 index 00000000000..22ba0d8814d --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseAggregateTest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseAggregate; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.DataTypes; +import org.junit.Test; + +/** + * Tests for creating DSE extended aggregates. Most of these tests are copied from the OSS {@code + * com.datastax.oss.driver.internal.querybuilder.schema.CreateAggregateTest} class to ensure DSE + * extended behavior does not break OSS functionality, with additional tests to verify the DSE + * specific functionality (i.e. the DETERMINISTIC keyword). + */ +public class CreateDseAggregateTest { + + @Test + public void should_create_aggreate_with_simple_param() { + + assertThat( + createDseAggregate("keyspace1", "agg1") + .withParameter(DataTypes.INT) + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE AGGREGATE keyspace1.agg1 (int) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_create_aggregate_with_many_params() { + + assertThat( + createDseAggregate("keyspace1", "agg2") + .withParameter(DataTypes.INT) + .withParameter(DataTypes.TEXT) + .withParameter(DataTypes.BOOLEAN) + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE AGGREGATE keyspace1.agg2 (int,text,boolean) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_create_aggregate_with_param_without_frozen() { + + assertThat( + createDseAggregate("keyspace1", "agg9") + .withParameter(DataTypes.tupleOf(DataTypes.TEXT)) + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE AGGREGATE keyspace1.agg9 (tuple) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_create_aggregate_with_no_params() { + + assertThat( + createDseAggregate("keyspace1", "agg3") + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE AGGREGATE keyspace1.agg3 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_create_aggregate_with_no_keyspace() { + + assertThat( + createDseAggregate("agg4") + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE AGGREGATE agg4 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_create_aggregate_with_if_not_exists() { + + assertThat( + createDseAggregate("agg6") + .ifNotExists() + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE AGGREGATE IF NOT EXISTS agg6 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_create_aggregate_with_no_final_func() { + + assertThat( + createDseAggregate("cycling", "sum") + .withParameter(DataTypes.INT) + .withSFunc("dsum") + .withSType(DataTypes.INT) + .asCql()) + .isEqualTo("CREATE AGGREGATE cycling.sum (int) SFUNC dsum STYPE int"); + } + + @Test + public void should_create_or_replace() { + assertThat( + createDseAggregate("keyspace1", "agg7") + .orReplace() + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .asCql()) + .isEqualTo( + "CREATE OR REPLACE AGGREGATE keyspace1.agg7 () SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0)"); + } + + @Test + public void should_not_throw_on_toString_for_CreateAggregateStart() { + assertThat(createDseAggregate("agg1").toString()).isEqualTo("CREATE AGGREGATE agg1 ()"); + } + + @Test + public void should_not_throw_on_toString_for_CreateAggregateWithParam() { + assertThat(createDseAggregate("func1").withParameter(DataTypes.INT).toString()) + .isEqualTo("CREATE AGGREGATE func1 (int)"); + } + + @Test + public void should_not_throw_on_toString_for_NotExists_OrReplace() { + assertThat(createDseAggregate("func1").ifNotExists().orReplace().toString()) + .isEqualTo("CREATE OR REPLACE AGGREGATE IF NOT EXISTS func1 ()"); + } + + @Test + public void should_create_aggregate_with_deterministic() { + + assertThat( + createDseAggregate("keyspace1", "agg1") + .withParameter(DataTypes.INT) + .withSFunc("sfunction") + .withSType(DataTypes.ASCII) + .withFinalFunc("finalfunction") + .withInitCond(tuple(literal(0), literal(0))) + .deterministic() + .asCql()) + .isEqualTo( + "CREATE AGGREGATE keyspace1.agg1 (int) SFUNC sfunction STYPE ascii FINALFUNC finalfunction INITCOND (0,0) DETERMINISTIC"); + } +} diff --git a/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseFunctionTest.java b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseFunctionTest.java new file mode 100644 index 00000000000..b795b4c5251 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/dse/driver/internal/querybuilder/schema/CreateDseFunctionTest.java @@ -0,0 +1,454 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.dse.driver.internal.querybuilder.schema; + +import static com.datastax.dse.driver.api.querybuilder.DseSchemaBuilder.createDseFunction; +import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.udt; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.type.DataTypes; +import org.junit.Test; + +/** + * Tests for creating DSE extended functions. Most of these tests are copied from the OSS {@code + * com.datastax.oss.driver.internal.querybuilder.schema.CreateFunctionTest} class to ensure DSE + * extended behavior does not break OSS functionality, with additional tests to verify the DSE + * specific functionality (i.e. the DETERMINISTIC and MONOTONIC keywords). + */ +public class CreateDseFunctionTest { + + @Test + public void should_not_throw_on_toString_for_CreateFunctionStart() { + String funcStr = createDseFunction("func1").toString(); + assertThat(funcStr).isEqualTo("CREATE FUNCTION func1 () CALLED ON NULL INPUT"); + } + + @Test + public void should_not_throw_on_toString_for_CreateFunctionWithType() { + assertThat( + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .toString()) + .isEqualTo("CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int"); + } + + @Test + public void should_not_throw_on_toString_for_CreateFunctionWithLanguage() { + assertThat( + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .withJavaLanguage() + .toString()) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int LANGUAGE java"); + } + + @Test + public void should_create_function_with_simple_params() { + assertThat( + createDseFunction("keyspace1", "func1") + .withParameter("param1", DataTypes.INT) + .calledOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .asQuoted("return Integer.toString(param1);") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func1 (param1 int) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void should_create_function_with_param_and_return_type_not_frozen() { + assertThat( + createDseFunction("keyspace1", "func6") + .withParameter("param1", DataTypes.tupleOf(DataTypes.INT, DataTypes.INT)) + .returnsNullOnNull() + .returnsType(udt("person", true)) + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func6 (param1 tuple) RETURNS NULL ON NULL INPUT RETURNS person LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void should_honor_returns_null() { + assertThat( + createDseFunction("keyspace1", "func2") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .asQuoted("return Integer.toString(param1);") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func2 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void should_create_function_with_many_params() { + assertThat( + createDseFunction("keyspace1", "func3") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.TEXT) + .withParameter("param3", DataTypes.BOOLEAN) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .asQuoted("return Integer.toString(param1);") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func3 (param1 int,param2 text,param3 boolean) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void should_create_function_with_no_params() { + + assertThat( + createDseFunction("keyspace1", "func4") + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withLanguage("java") + .asQuoted("return \"hello world\";") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func4 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); + } + + @Test + public void should_create_function_with_no_keyspace() { + assertThat( + createDseFunction("func5") + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .asQuoted("return \"hello world\";") + .asCql()) + .isEqualTo( + "CREATE FUNCTION func5 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); + } + + @Test + public void should_create_function_with_if_not_exists() { + assertThat( + createDseFunction("keyspace1", "func6") + .ifNotExists() + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .asQuoted("return \"hello world\";") + .asCql()) + .isEqualTo( + "CREATE FUNCTION IF NOT EXISTS keyspace1.func6 () RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return \"hello world\";'"); + } + + @Test + public void should_create_or_replace() { + assertThat( + createDseFunction("keyspace1", "func6") + .orReplace() + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .asQuoted("return Integer.toString(param1);") + .asCql()) + .isEqualTo( + "CREATE OR REPLACE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void should_not_quote_body_using_as() { + assertThat( + createDseFunction("keyspace1", "func6") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote() { + assertThat( + createDseFunction("keyspace1", "func6") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .withJavaScriptLanguage() + .asQuoted("'hello ' + param1;") + .asCql()) + .isEqualTo( + "CREATE FUNCTION keyspace1.func6 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text LANGUAGE javascript AS $$ 'hello ' + param1; $$"); + } + + @Test + public void should_not_throw_on_toString_for_create_function_with_deterministic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .deterministic() + .toString(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC"); + } + + @Test + public void should_not_quote_body_using_as_with_deterministic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .deterministic() + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void + should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_deterministic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .deterministic() + .withJavaScriptLanguage() + .asQuoted("'hello ' + param1;") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC LANGUAGE javascript AS $$ 'hello ' + param1; $$"); + } + + @Test + public void should_not_throw_on_toString_for_create_function_with_monotonic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .monotonic() + .toString(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int MONOTONIC"); + } + + @Test + public void should_not_quote_body_using_as_with_monotonic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .monotonic() + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void + should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_monotonic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .monotonic() + .withJavaScriptLanguage() + .asQuoted("'hello ' + param1;") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC LANGUAGE javascript AS $$ 'hello ' + param1; $$"); + } + + @Test + public void should_not_throw_on_toString_for_create_function_with_monotonic_on() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .monotonicOn("param2") + .toString(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS int MONOTONIC ON param2"); + } + + @Test + public void should_not_quote_body_using_as_with_monotonic_on() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .monotonicOn("param2") + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC ON param2 LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void + should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_monotonic_on() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .monotonicOn("param2") + .withJavaScriptLanguage() + .asQuoted("'hello ' + param1;") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text MONOTONIC ON param2 LANGUAGE javascript AS $$ 'hello ' + param1; $$"); + } + + @Test + public void should_not_throw_on_toString_for_create_function_with_deterministic_and_monotonic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .deterministic() + .monotonic() + .toString(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC"); + } + + @Test + public void should_not_quote_body_using_as_with_deterministic_and_monotonic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .deterministic() + .monotonic() + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void + should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_deterministic_and_monotonic() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .deterministic() + .monotonic() + .withJavaScriptLanguage() + .asQuoted("'hello ' + param1;") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC LANGUAGE javascript AS $$ 'hello ' + param1; $$"); + } + + @Test + public void + should_not_throw_on_toString_for_create_function_with_deterministic_and_monotonic_on() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.INT) + .deterministic() + .monotonicOn("param2") + .toString(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS int DETERMINISTIC MONOTONIC ON param2"); + } + + @Test + public void should_not_quote_body_using_as_with_deterministic_and_monotonic_on() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .deterministic() + .monotonicOn("param2") + .withJavaLanguage() + .as("'return Integer.toString(param1);'") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC ON param2 LANGUAGE java AS 'return Integer.toString(param1);'"); + } + + @Test + public void + should_quote_with_dollar_signs_on_asQuoted_if_body_contains_single_quote_with_deterministic_and_monotonic_on() { + final String funcStr = + createDseFunction("func1") + .withParameter("param1", DataTypes.INT) + .withParameter("param2", DataTypes.INT) + .returnsNullOnNull() + .returnsType(DataTypes.TEXT) + .deterministic() + .monotonicOn("param2") + .withJavaScriptLanguage() + .asQuoted("'hello ' + param1;") + .asCql(); + assertThat(funcStr) + .isEqualTo( + "CREATE FUNCTION func1 (param1 int,param2 int) RETURNS NULL ON NULL INPUT RETURNS text DETERMINISTIC MONOTONIC ON param2 LANGUAGE javascript AS $$ 'hello ' + param1; $$"); + } +} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java index b931357fd92..21f1922f6ca 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/Assertions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java index 38cc5c9d43a..8652fc7e3c8 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java index a9ba444072b..875f957b2fb 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/BuildableQueryTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java index 5b16cc80f9b..5620bcc2fe9 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CharsetCodec.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import com.datastax.oss.driver.internal.core.type.codec.registry.DefaultCodecRegistry; import com.datastax.oss.driver.internal.querybuilder.DefaultLiteral; @@ -31,8 +34,13 @@ public class CharsetCodec implements TypeCodec { /** A registry that contains an instance of this codec. */ - public static final CodecRegistry TEST_REGISTRY = - new DefaultCodecRegistry("test", new CharsetCodec()); + public static final CodecRegistry TEST_REGISTRY; + + static { + MutableCodecRegistry registry = new DefaultCodecRegistry("test"); + registry.register(new CharsetCodec()); + TEST_REGISTRY = registry; + } @NonNull @Override diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java index ee8d41d467d..0d76bbea3c6 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/CqlSnippetAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/TokenLiteralTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/TokenLiteralTest.java new file mode 100644 index 00000000000..49584ea0c18 --- /dev/null +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/TokenLiteralTest.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.querybuilder; + +import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; + +import com.datastax.oss.driver.internal.core.metadata.token.ByteOrderedTokenFactory; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory; +import com.datastax.oss.driver.internal.core.metadata.token.RandomTokenFactory; +import org.junit.Test; + +public class TokenLiteralTest { + + @Test + public void should_inline_murmur3_token_literal() { + assertThat( + selectFrom("test") + .all() + .whereToken("pk") + .isEqualTo(literal(Murmur3TokenFactory.MIN_TOKEN))) + .hasCql("SELECT * FROM test WHERE token(pk)=-9223372036854775808"); + } + + @Test + public void should_inline_byte_ordered_token_literal() { + assertThat( + selectFrom("test") + .all() + .whereToken("pk") + .isEqualTo(literal(ByteOrderedTokenFactory.MIN_TOKEN))) + .hasCql("SELECT * FROM test WHERE token(pk)=0x"); + } + + @Test + public void should_inline_random_token_literal() { + assertThat( + selectFrom("test") + .all() + .whereToken("pk") + .isEqualTo(literal(RandomTokenFactory.MIN_TOKEN))) + .hasCql("SELECT * FROM test WHERE token(pk)=-1"); + } +} diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java index 06296960a69..08e4a42a568 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/condition/ConditionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java index 2a75bbaa8d7..6d967f87a33 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentConditionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java index fa8afdf5a67..e42038029a3 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteFluentRelationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java index 5d9cb05a914..9b0dead3845 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteIdempotenceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java index 7c3f7685bb5..cce4cf51a10 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteSelectorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; +import com.datastax.oss.driver.api.core.data.CqlVector; import org.junit.Test; public class DeleteSelectorTest { @@ -32,6 +35,16 @@ public void should_generate_column_deletion() { .hasCql("DELETE v FROM ks.foo WHERE k=?"); } + @Test + public void should_generate_vector_deletion() { + assertThat( + deleteFrom("foo") + .column("v") + .whereColumn("k") + .isEqualTo(literal(CqlVector.newInstance(0.1, 0.2)))) + .hasCql("DELETE v FROM foo WHERE k=[0.1, 0.2]"); + } + @Test public void should_generate_field_deletion() { assertThat( diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java index 27c343694c7..daa4ece66e6 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/delete/DeleteTimestampTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java index 57fd40152fa..37baf2f91ed 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/InsertIdempotenceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java index 5bfd7eea59a..8fa9dcddc33 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/JsonInsertTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java index 30e39c4836a..89c833ff1c6 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/insert/RegularInsertTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,19 +21,17 @@ import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; +import static org.assertj.core.api.Assertions.catchThrowable; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.querybuilder.term.Term; import com.datastax.oss.driver.internal.querybuilder.insert.DefaultInsert; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import java.util.Map; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class RegularInsertTest { - @Rule public ExpectedException thrown = ExpectedException.none(); - @Test public void should_generate_column_assignments() { assertThat(insertInto("foo").value("a", literal(1)).value("b", literal(2))) @@ -42,6 +42,12 @@ public void should_generate_column_assignments() { .hasCql("INSERT INTO foo (a,b) VALUES (?,?)"); } + @Test + public void should_generate_vector_literals() { + assertThat(insertInto("foo").value("a", literal(CqlVector.newInstance(0.1, 0.2, 0.3)))) + .hasCql("INSERT INTO foo (a) VALUES ([0.1, 0.2, 0.3])"); + } + @Test public void should_keep_last_assignment_if_column_listed_twice() { assertThat( @@ -123,18 +129,22 @@ public void should_throw_exception_with_invalid_ttl() { DefaultInsert defaultInsert = (DefaultInsert) insertInto("foo").value("a", bindMarker()).usingTtl(10); - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("TTL value must be a BindMarker or an Integer"); - - new DefaultInsert( - defaultInsert.getKeyspace(), - defaultInsert.getTable(), - (Term) defaultInsert.getJson(), - defaultInsert.getMissingJsonBehavior(), - defaultInsert.getAssignments(), - defaultInsert.getTimestamp(), - new Object(), // invalid TTL object - defaultInsert.isIfNotExists()); + Throwable t = + catchThrowable( + () -> + new DefaultInsert( + defaultInsert.getKeyspace(), + defaultInsert.getTable(), + (Term) defaultInsert.getJson(), + defaultInsert.getMissingJsonBehavior(), + defaultInsert.getAssignments(), + defaultInsert.getTimestamp(), + new Object(), // invalid TTL object + defaultInsert.isIfNotExists())); + + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("TTL value must be a BindMarker or an Integer"); } @Test @@ -142,17 +152,20 @@ public void should_throw_exception_with_invalid_timestamp() { DefaultInsert defaultInsert = (DefaultInsert) insertInto("foo").value("a", bindMarker()).usingTimestamp(1); - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("TIMESTAMP value must be a BindMarker or a Long"); - - new DefaultInsert( - defaultInsert.getKeyspace(), - defaultInsert.getTable(), - (Term) defaultInsert.getJson(), - defaultInsert.getMissingJsonBehavior(), - defaultInsert.getAssignments(), - new Object(), // invalid timestamp object) - defaultInsert.getTtlInSeconds(), - defaultInsert.isIfNotExists()); + Throwable t = + catchThrowable( + () -> + new DefaultInsert( + defaultInsert.getKeyspace(), + defaultInsert.getTable(), + (Term) defaultInsert.getJson(), + defaultInsert.getMissingJsonBehavior(), + defaultInsert.getAssignments(), + new Object(), // invalid timestamp object) + defaultInsert.getTtlInSeconds(), + defaultInsert.isIfNotExists())); + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("TIMESTAMP value must be a BindMarker or a Long"); } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java index f3ff81ed188..ec121eaa050 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/RelationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +19,12 @@ import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.tuple; +import org.assertj.core.util.Lists; import org.junit.Test; public class RelationTest { @@ -40,13 +44,78 @@ public void should_generate_is_not_null_relation() { } @Test - public void should_generate_in_relation() { + public void should_generate_contains_relation() { + assertThat(selectFrom("foo").all().where(Relation.column("k").contains(literal(1)))) + .hasCql("SELECT * FROM foo WHERE k CONTAINS 1"); + } + + @Test + public void should_generate_contains_key_relation() { + assertThat(selectFrom("foo").all().where(Relation.column("k").containsKey(literal(1)))) + .hasCql("SELECT * FROM foo WHERE k CONTAINS KEY 1"); + } + + @Test + public void should_generate_not_contains_relation() { + assertThat(selectFrom("foo").all().where(Relation.column("k").notContains(literal(1)))) + .hasCql("SELECT * FROM foo WHERE k NOT CONTAINS 1"); + } + + @Test + public void should_generate_not_contains_key_relation() { + assertThat(selectFrom("foo").all().where(Relation.column("k").notContainsKey(literal(1)))) + .hasCql("SELECT * FROM foo WHERE k NOT CONTAINS KEY 1"); + } + + @Test + public void should_generate_in_relation_bind_markers() { assertThat(selectFrom("foo").all().where(Relation.column("k").in(bindMarker()))) .hasCql("SELECT * FROM foo WHERE k IN ?"); assertThat(selectFrom("foo").all().where(Relation.column("k").in(bindMarker(), bindMarker()))) .hasCql("SELECT * FROM foo WHERE k IN (?,?)"); } + @Test + public void should_generate_in_relation_terms() { + assertThat( + selectFrom("foo") + .all() + .where( + Relation.column("k") + .in(Lists.newArrayList(literal(1), literal(2), literal(3))))) + .hasCql("SELECT * FROM foo WHERE k IN (1,2,3)"); + assertThat( + selectFrom("foo") + .all() + .where(Relation.column("k").in(literal(1), literal(2), literal(3)))) + .hasCql("SELECT * FROM foo WHERE k IN (1,2,3)"); + } + + @Test + public void should_generate_not_in_relation_bind_markers() { + assertThat(selectFrom("foo").all().where(Relation.column("k").notIn(bindMarker()))) + .hasCql("SELECT * FROM foo WHERE k NOT IN ?"); + assertThat( + selectFrom("foo").all().where(Relation.column("k").notIn(bindMarker(), bindMarker()))) + .hasCql("SELECT * FROM foo WHERE k NOT IN (?,?)"); + } + + @Test + public void should_generate_not_in_relation_terms() { + assertThat( + selectFrom("foo") + .all() + .where( + Relation.column("k") + .notIn(Lists.newArrayList(literal(1), literal(2), literal(3))))) + .hasCql("SELECT * FROM foo WHERE k NOT IN (1,2,3)"); + assertThat( + selectFrom("foo") + .all() + .where(Relation.column("k").notIn(literal(1), literal(2), literal(3)))) + .hasCql("SELECT * FROM foo WHERE k NOT IN (1,2,3)"); + } + @Test public void should_generate_token_relation() { assertThat(selectFrom("foo").all().where(Relation.token("k1", "k2").isEqualTo(bindMarker("t")))) diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java index 320b7c827b8..61d09ecea7b 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/relation/TermTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -116,7 +118,7 @@ public void should_generate_literal_terms() { @Test public void should_fail_when_no_codec_for_literal() { - assertThatThrownBy(() -> literal(new Date(2018, 10, 10))) + assertThatThrownBy(() -> literal(new Date(1234))) .isInstanceOf(IllegalArgumentException.class) .hasMessage( "Could not inline literal of type java.util.Date. " diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java index 6d56be76c29..3c1b8ca7af1 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java index 360b6f35183..ef131a255a7 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterMaterializedViewTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java index 16db985ba9c..2c99b154b38 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTableTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,6 +42,12 @@ public void should_generate_alter_table_with_add_single_column() { .hasCql("ALTER TABLE foo.bar ADD x text"); } + @Test + public void should_generate_alter_table_with_add_single_column_static() { + assertThat(alterTable("foo", "bar").addStaticColumn("x", DataTypes.TEXT)) + .hasCql("ALTER TABLE foo.bar ADD x text STATIC"); + } + @Test public void should_generate_alter_table_with_add_three_columns() { assertThat( @@ -100,4 +108,10 @@ public void should_generate_alter_table_with_no_compression() { assertThat(alterTable("bar").withNoCompression()) .hasCql("ALTER TABLE bar WITH compression={'sstable_compression':''}"); } + + @Test + public void should_generate_alter_table_with_vector() { + assertThat(alterTable("bar").alterColumn("v", DataTypes.vectorOf(DataTypes.FLOAT, 3))) + .hasCql("ALTER TABLE bar ALTER v TYPE vector"); + } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java index 6ae49c8533c..14bec0a6ce3 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/AlterTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,4 +53,10 @@ public void should_generate_alter_table_with_rename_three_columns() { assertThat(alterType("bar").renameField("x", "y").renameField("u", "v").renameField("b", "a")) .hasCql("ALTER TYPE bar RENAME x TO y AND u TO v AND b TO a"); } + + @Test + public void should_generate_alter_type_with_vector() { + assertThat(alterType("foo", "bar").alterField("vec", DataTypes.vectorOf(DataTypes.FLOAT, 3))) + .hasCql("ALTER TYPE foo.bar ALTER vec TYPE vector"); + } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java index f9dcf41d41a..00e41dd87c0 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateAggregateTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java index 02a91fd627d..18c9813a5cf 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateFunctionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java index d654219f23c..03d3bfd4223 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateIndexTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java index 3c067aa9e3c..a11f9df94a1 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java index 88ff1dafd8f..9c5180429b3 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateMaterializedViewTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java index 16f4c2e0d10..31efc278472 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTableTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,6 +28,7 @@ import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.CompactionWindowUnit; import com.datastax.oss.driver.api.querybuilder.schema.compaction.TimeWindowCompactionStrategy.TimestampResolution; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; +import java.nio.charset.StandardCharsets; import org.junit.Test; public class CreateTableTest { @@ -167,6 +170,12 @@ public void should_generate_create_table_with_options() { .withComment("Hello world") .withDcLocalReadRepairChance(0.54) .withDefaultTimeToLiveSeconds(86400) + .withExtensions( + ImmutableMap.of( + "key1", + "apache".getBytes(StandardCharsets.UTF_8), + "key2", + "cassandra".getBytes(StandardCharsets.UTF_8))) .withGcGraceSeconds(864000) .withMemtableFlushPeriodInMs(10000) .withMinIndexInterval(1024) @@ -174,7 +183,7 @@ public void should_generate_create_table_with_options() { .withReadRepairChance(0.55) .withSpeculativeRetry("99percentile")) .hasCql( - "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH bloom_filter_fp_chance=0.42 AND cdc=false AND comment='Hello world' AND dclocal_read_repair_chance=0.54 AND default_time_to_live=86400 AND gc_grace_seconds=864000 AND memtable_flush_period_in_ms=10000 AND min_index_interval=1024 AND max_index_interval=4096 AND read_repair_chance=0.55 AND speculative_retry='99percentile'"); + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH bloom_filter_fp_chance=0.42 AND cdc=false AND comment='Hello world' AND dclocal_read_repair_chance=0.54 AND default_time_to_live=86400 AND extensions={'key1':0x617061636865,'key2':0x63617373616e647261} AND gc_grace_seconds=864000 AND memtable_flush_period_in_ms=10000 AND min_index_interval=1024 AND max_index_interval=4096 AND read_repair_chance=0.55 AND speculative_retry='99percentile'"); } @Test @@ -190,6 +199,17 @@ public void should_generate_create_table_lz4_compression() { @Test public void should_generate_create_table_lz4_compression_options() { + assertThat( + createTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withLZ4Compression(1024)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_in_kb':1024}"); + } + + @Test + public void should_generate_create_table_lz4_compression_options_crc() { assertThat( createTable("bar") .withPartitionKey("k", DataTypes.INT) @@ -199,6 +219,28 @@ public void should_generate_create_table_lz4_compression_options() { "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}"); } + @Test + public void should_generate_create_table_zstd_compression() { + assertThat( + createTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withZstdCompression()) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor'}"); + } + + @Test + public void should_generate_create_table_zstd_compression_options() { + assertThat( + createTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withZstdCompression(1024)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'ZstdCompressor','chunk_length_in_kb':1024}"); + } + @Test public void should_generate_create_table_snappy_compression() { assertThat( @@ -212,6 +254,17 @@ public void should_generate_create_table_snappy_compression() { @Test public void should_generate_create_table_snappy_compression_options() { + assertThat( + createTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withSnappyCompression(2048)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}"); + } + + @Test + public void should_generate_create_table_snappy_compression_options_crc() { assertThat( createTable("bar") .withPartitionKey("k", DataTypes.INT) @@ -234,6 +287,17 @@ public void should_generate_create_table_deflate_compression() { @Test public void should_generate_create_table_deflate_compression_options() { + assertThat( + createTable("bar") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.TEXT) + .withDeflateCompression(4096)) + .hasCql( + "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}"); + } + + @Test + public void should_generate_create_table_deflate_compression_options_crc() { assertThat( createTable("bar") .withPartitionKey("k", DataTypes.INT) @@ -305,4 +369,13 @@ public void should_generate_create_table_time_window_compaction() { .hasCql( "CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compaction={'class':'TimeWindowCompactionStrategy','compaction_window_size':10,'compaction_window_unit':'DAYS','timestamp_resolution':'MICROSECONDS','unsafe_aggressive_sstable_expiration':false}"); } + + @Test + public void should_generate_vector_column() { + assertThat( + createTable("foo") + .withPartitionKey("k", DataTypes.INT) + .withColumn("v", DataTypes.vectorOf(DataTypes.FLOAT, 3))) + .hasCql("CREATE TABLE foo (k int PRIMARY KEY,v vector)"); + } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java index 7015d49067c..f7c15788a0f 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/CreateTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -81,4 +83,13 @@ public void should_create_type_with_collections() { .withField("map", DataTypes.mapOf(DataTypes.INT, DataTypes.TEXT))) .hasCql("CREATE TYPE ks1.type (map map)"); } + + @Test + public void should_create_type_with_vector() { + assertThat( + createType("ks1", "type") + .withField("c1", DataTypes.INT) + .withField("vec", DataTypes.vectorOf(DataTypes.FLOAT, 3))) + .hasCql("CREATE TYPE ks1.type (c1 int,vec vector)"); + } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java index 5af944238ff..875ed7d7432 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropAggregateTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java index 6a50f126805..3157212a271 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropFunctionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java index 46efe7c5f37..150b52c86e3 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropIndexTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java index fa84c4e6783..4f124f0bc04 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropKeyspaceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java index a323de03c4c..054b0b2e3f7 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropMaterializedViewTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java index 0a986077f1c..79f655346c5 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTableTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java index cb9613f94b4..a2c5e35054a 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/schema/DropTypeTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java index 79c89feeb30..73220cfe0d7 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectAllowFilteringTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java index 884454d91f9..497cd7876c0 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectFluentRelationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java index 12d6d2e3223..368b9dfc480 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectGroupByTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java index cba2e83d04d..d617aa5673f 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectLimitTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java index 348c735764c..a9c618e9559 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectOrderingTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,6 +23,7 @@ import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.querybuilder.relation.Relation; import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap; import org.junit.Test; @@ -72,4 +75,23 @@ public void should_replace_previous_ordering() { .orderBy(ImmutableMap.of("c1", DESC, "c2", ASC))) .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c3 ASC,c1 DESC,c2 ASC"); } + + @Test + public void should_generate_ann_clause() { + assertThat( + selectFrom("foo") + .all() + .where(Relation.column("k").isEqualTo(literal(1))) + .orderByAnnOf("c1", CqlVector.newInstance(0.1, 0.2, 0.3))) + .hasCql("SELECT * FROM foo WHERE k=1 ORDER BY c1 ANN OF [0.1, 0.2, 0.3]"); + } + + @Test(expected = IllegalArgumentException.class) + public void should_fail_when_provided_ann_with_other_orderings() { + selectFrom("foo") + .all() + .where(Relation.column("k").isEqualTo(literal(1))) + .orderBy("c1", ASC) + .orderByAnnOf("c2", CqlVector.newInstance(0.1, 0.2, 0.3)); + } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java index 42fa35bcd86..7e03627d4b7 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/select/SelectSelectorTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +22,7 @@ import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.raw; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; +import com.datastax.oss.driver.api.core.data.CqlVector; import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException; import com.datastax.oss.driver.api.querybuilder.CharsetCodec; @@ -228,6 +231,48 @@ public void should_generate_raw_selector() { .hasCql("SELECT bar,baz FROM foo"); } + @Test + public void should_generate_similarity_functions() { + Select similarity_cosine_clause = + selectFrom("cycling", "comments_vs") + .column("comment") + .function( + "similarity_cosine", + Selector.column("comment_vector"), + literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) + .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) + .limit(1); + assertThat(similarity_cosine_clause) + .hasCql( + "SELECT comment,similarity_cosine(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1"); + + Select similarity_euclidean_clause = + selectFrom("cycling", "comments_vs") + .column("comment") + .function( + "similarity_euclidean", + Selector.column("comment_vector"), + literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) + .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) + .limit(1); + assertThat(similarity_euclidean_clause) + .hasCql( + "SELECT comment,similarity_euclidean(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1"); + + Select similarity_dot_product_clause = + selectFrom("cycling", "comments_vs") + .column("comment") + .function( + "similarity_dot_product", + Selector.column("comment_vector"), + literal(CqlVector.newInstance(0.2, 0.15, 0.3, 0.2, 0.05))) + .orderByAnnOf("comment_vector", CqlVector.newInstance(0.1, 0.15, 0.3, 0.12, 0.05)) + .limit(1); + assertThat(similarity_dot_product_clause) + .hasCql( + "SELECT comment,similarity_dot_product(comment_vector,[0.2, 0.15, 0.3, 0.2, 0.05]) FROM cycling.comments_vs ORDER BY comment_vector ANN OF [0.1, 0.15, 0.3, 0.12, 0.05] LIMIT 1"); + } + @Test public void should_alias_selectors() { assertThat(selectFrom("foo").column("bar").as("baz")).hasCql("SELECT bar AS baz FROM foo"); diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java index a556457b297..f4c8d22a294 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/truncate/TruncateTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java index 184ad2e2dbf..34f2538587e 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentAssignmentTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -60,21 +62,31 @@ public void should_generate_map_value_assignment() { .hasCql("UPDATE foo SET features['color']=? WHERE k=?"); } + @Test + public void should_generate_list_value_assignment() { + assertThat( + update("foo") + .setListValue("features", literal(1), bindMarker()) + .whereColumn("k") + .isEqualTo(bindMarker())) + .hasCql("UPDATE foo SET features[1]=? WHERE k=?"); + } + @Test public void should_generate_counter_operations() { assertThat(update("foo").increment("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c+=1 WHERE k=?"); + .hasCql("UPDATE foo SET c=c+1 WHERE k=?"); assertThat(update("foo").increment("c", literal(2)).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c+=2 WHERE k=?"); + .hasCql("UPDATE foo SET c=c+2 WHERE k=?"); assertThat(update("foo").increment("c", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c+=? WHERE k=?"); + .hasCql("UPDATE foo SET c=c+? WHERE k=?"); assertThat(update("foo").decrement("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c-=1 WHERE k=?"); + .hasCql("UPDATE foo SET c=c-1 WHERE k=?"); assertThat(update("foo").decrement("c", literal(2)).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c-=2 WHERE k=?"); + .hasCql("UPDATE foo SET c=c-2 WHERE k=?"); assertThat(update("foo").decrement("c", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c-=? WHERE k=?"); + .hasCql("UPDATE foo SET c=c-? WHERE k=?"); } @Test @@ -82,15 +94,15 @@ public void should_generate_list_operations() { Literal listLiteral = literal(ImmutableList.of(1, 2, 3)); assertThat(update("foo").append("l", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l+=? WHERE k=?"); + .hasCql("UPDATE foo SET l=l+? WHERE k=?"); assertThat(update("foo").append("l", listLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l+=[1,2,3] WHERE k=?"); + .hasCql("UPDATE foo SET l=l+[1,2,3] WHERE k=?"); assertThat( update("foo") .appendListElement("l", bindMarker()) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l+=[?] WHERE k=?"); + .hasCql("UPDATE foo SET l=l+[?] WHERE k=?"); assertThat(update("foo").prepend("l", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) .hasCql("UPDATE foo SET l=?+l WHERE k=?"); @@ -104,15 +116,15 @@ public void should_generate_list_operations() { .hasCql("UPDATE foo SET l=[?]+l WHERE k=?"); assertThat(update("foo").remove("l", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l-=? WHERE k=?"); + .hasCql("UPDATE foo SET l=l-? WHERE k=?"); assertThat(update("foo").remove("l", listLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l-=[1,2,3] WHERE k=?"); + .hasCql("UPDATE foo SET l=l-[1,2,3] WHERE k=?"); assertThat( update("foo") .removeListElement("l", bindMarker()) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l-=[?] WHERE k=?"); + .hasCql("UPDATE foo SET l=l-[?] WHERE k=?"); } @Test @@ -120,15 +132,15 @@ public void should_generate_set_operations() { Literal setLiteral = literal(ImmutableSet.of(1, 2, 3)); assertThat(update("foo").append("s", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s+=? WHERE k=?"); + .hasCql("UPDATE foo SET s=s+? WHERE k=?"); assertThat(update("foo").append("s", setLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s+={1,2,3} WHERE k=?"); + .hasCql("UPDATE foo SET s=s+{1,2,3} WHERE k=?"); assertThat( update("foo") .appendSetElement("s", bindMarker()) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s+={?} WHERE k=?"); + .hasCql("UPDATE foo SET s=s+{?} WHERE k=?"); assertThat(update("foo").prepend("s", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) .hasCql("UPDATE foo SET s=?+s WHERE k=?"); @@ -142,15 +154,15 @@ public void should_generate_set_operations() { .hasCql("UPDATE foo SET s={?}+s WHERE k=?"); assertThat(update("foo").remove("s", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s-=? WHERE k=?"); + .hasCql("UPDATE foo SET s=s-? WHERE k=?"); assertThat(update("foo").remove("s", setLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s-={1,2,3} WHERE k=?"); + .hasCql("UPDATE foo SET s=s-{1,2,3} WHERE k=?"); assertThat( update("foo") .removeSetElement("s", bindMarker()) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s-={?} WHERE k=?"); + .hasCql("UPDATE foo SET s=s-{?} WHERE k=?"); } @Test @@ -158,15 +170,15 @@ public void should_generate_map_operations() { Literal mapLiteral = literal(ImmutableMap.of(1, "foo", 2, "bar")); assertThat(update("foo").append("m", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m+=? WHERE k=?"); + .hasCql("UPDATE foo SET m=m+? WHERE k=?"); assertThat(update("foo").append("m", mapLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m+={1:'foo',2:'bar'} WHERE k=?"); + .hasCql("UPDATE foo SET m=m+{1:'foo',2:'bar'} WHERE k=?"); assertThat( update("foo") .appendMapEntry("m", literal(1), literal("foo")) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m+={1:'foo'} WHERE k=?"); + .hasCql("UPDATE foo SET m=m+{1:'foo'} WHERE k=?"); assertThat(update("foo").prepend("m", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) .hasCql("UPDATE foo SET m=?+m WHERE k=?"); @@ -180,14 +192,14 @@ public void should_generate_map_operations() { .hasCql("UPDATE foo SET m={1:'foo'}+m WHERE k=?"); assertThat(update("foo").remove("m", bindMarker()).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m-=? WHERE k=?"); + .hasCql("UPDATE foo SET m=m-? WHERE k=?"); assertThat(update("foo").remove("m", mapLiteral).whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m-={1:'foo',2:'bar'} WHERE k=?"); + .hasCql("UPDATE foo SET m=m-{1:'foo',2:'bar'} WHERE k=?"); assertThat( update("foo") .removeMapEntry("m", literal(1), literal("foo")) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m-={1:'foo'} WHERE k=?"); + .hasCql("UPDATE foo SET m=m-{1:'foo'} WHERE k=?"); } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java index 8a562431510..3f333e0ef86 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentConditionTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java index 86559ea65f1..9d67d0b9819 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateFluentRelationTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java index 1f424484ee6..6727e5856ef 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateIdempotenceTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -90,10 +92,10 @@ public void should_not_be_idempotent_if_using_non_idempotent_term_in_relation() @Test public void should_not_be_idempotent_if_updating_counter() { assertThat(update("foo").increment("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c+=1 WHERE k=?") + .hasCql("UPDATE foo SET c=c+1 WHERE k=?") .isNotIdempotent(); assertThat(update("foo").decrement("c").whereColumn("k").isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET c-=1 WHERE k=?") + .hasCql("UPDATE foo SET c=c-1 WHERE k=?") .isNotIdempotent(); } @@ -104,7 +106,7 @@ public void should_not_be_idempotent_if_adding_element_to_list() { .appendListElement("l", literal(1)) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l+=[1] WHERE k=?") + .hasCql("UPDATE foo SET l=l+[1] WHERE k=?") .isNotIdempotent(); assertThat( update("foo") @@ -120,14 +122,37 @@ public void should_not_be_idempotent_if_adding_element_to_list() { .appendSetElement("s", literal(1)) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET s+={1} WHERE k=?") + .hasCql("UPDATE foo SET s=s+{1} WHERE k=?") .isIdempotent(); assertThat( update("foo") .appendMapEntry("m", literal(1), literal("bar")) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET m+={1:'bar'} WHERE k=?") + .hasCql("UPDATE foo SET m=m+{1:'bar'} WHERE k=?") + .isIdempotent(); + + // Also, removals are always safe: + assertThat( + update("foo") + .removeListElement("l", literal(1)) + .whereColumn("k") + .isEqualTo(bindMarker())) + .hasCql("UPDATE foo SET l=l-[1] WHERE k=?") + .isIdempotent(); + assertThat( + update("foo") + .removeSetElement("s", literal(1)) + .whereColumn("k") + .isEqualTo(bindMarker())) + .hasCql("UPDATE foo SET s=s-{1} WHERE k=?") + .isIdempotent(); + assertThat( + update("foo") + .removeMapEntry("m", literal(1), literal("bar")) + .whereColumn("k") + .isEqualTo(bindMarker())) + .hasCql("UPDATE foo SET m=m-{1:'bar'} WHERE k=?") .isIdempotent(); } @@ -138,7 +163,7 @@ public void should_not_be_idempotent_if_concatenating_to_collection() { .append("l", literal(Arrays.asList(1, 2, 3))) .whereColumn("k") .isEqualTo(bindMarker())) - .hasCql("UPDATE foo SET l+=[1,2,3] WHERE k=?") + .hasCql("UPDATE foo SET l=l+[1,2,3] WHERE k=?") .isNotIdempotent(); assertThat( update("foo") @@ -147,5 +172,20 @@ public void should_not_be_idempotent_if_concatenating_to_collection() { .isEqualTo(bindMarker())) .hasCql("UPDATE foo SET l=[1,2,3]+l WHERE k=?") .isNotIdempotent(); + // However, removals are always safe: + assertThat( + update("foo") + .remove("l", literal(Arrays.asList(1, 2, 3))) + .whereColumn("k") + .isEqualTo(bindMarker())) + .hasCql("UPDATE foo SET l=l-[1,2,3] WHERE k=?") + .isIdempotent(); + } + + @Test + public void should_be_idempotent_if_relation_does_not_have_right_operand() { + assertThat(update("foo").setColumn("col1", literal(42)).whereColumn("col2").isNotNull()) + .hasCql("UPDATE foo SET col1=42 WHERE col2 IS NOT NULL") + .isIdempotent(); } } diff --git a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java index 591a53f2200..00006370f97 100644 --- a/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java +++ b/query-builder/src/test/java/com/datastax/oss/driver/api/querybuilder/update/UpdateUsingTest.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,16 +20,13 @@ import static com.datastax.oss.driver.api.querybuilder.Assertions.assertThat; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.update; +import static org.assertj.core.api.Assertions.catchThrowable; import com.datastax.oss.driver.internal.querybuilder.update.DefaultUpdate; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class UpdateUsingTest { - @Rule public ExpectedException thrown = ExpectedException.none(); - @Test public void should_generate_using_timestamp_clause() { assertThat( @@ -129,18 +128,22 @@ public void should_throw_exception_with_invalid_ttl() { .whereColumn("k") .isEqualTo(bindMarker()); - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("TTL value must be a BindMarker or an Integer"); + Throwable t = + catchThrowable( + () -> + new DefaultUpdate( + defaultUpdate.getKeyspace(), + defaultUpdate.getTable(), + defaultUpdate.getTimestamp(), + new Object(), // invalid TTL object + defaultUpdate.getAssignments(), + defaultUpdate.getRelations(), + defaultUpdate.isIfExists(), + defaultUpdate.getConditions())); - new DefaultUpdate( - defaultUpdate.getKeyspace(), - defaultUpdate.getTable(), - defaultUpdate.getTimestamp(), - new Object(), // invalid TTL object - defaultUpdate.getAssignments(), - defaultUpdate.getRelations(), - defaultUpdate.isIfExists(), - defaultUpdate.getConditions()); + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("TTL value must be a BindMarker or an Integer"); } @Test @@ -153,17 +156,20 @@ public void should_throw_exception_with_invalid_timestamp() { .whereColumn("k") .isEqualTo(bindMarker()); - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("TIMESTAMP value must be a BindMarker or a Long"); - - new DefaultUpdate( - defaultUpdate.getKeyspace(), - defaultUpdate.getTable(), - new Object(), // invalid timestamp object - defaultUpdate.getTtl(), - defaultUpdate.getAssignments(), - defaultUpdate.getRelations(), - defaultUpdate.isIfExists(), - defaultUpdate.getConditions()); + Throwable t = + catchThrowable( + () -> + new DefaultUpdate( + defaultUpdate.getKeyspace(), + defaultUpdate.getTable(), + new Object(), // invalid timestamp object + defaultUpdate.getTtl(), + defaultUpdate.getAssignments(), + defaultUpdate.getRelations(), + defaultUpdate.isIfExists(), + defaultUpdate.getConditions())); + assertThat(t) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("TIMESTAMP value must be a BindMarker or a Long"); } } diff --git a/query-builder/src/test/resources/project.properties b/query-builder/src/test/resources/project.properties new file mode 100644 index 00000000000..66eab90b6e4 --- /dev/null +++ b/query-builder/src/test/resources/project.properties @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +project.basedir=${basedir} \ No newline at end of file diff --git a/test-infra/pom.xml b/test-infra/pom.xml index 828f24fc144..5bf2d07f652 100644 --- a/test-infra/pom.xml +++ b/test-infra/pom.xml @@ -1,12 +1,15 @@ + 4.0.0 - - com.datastax.oss + org.apache.cassandra java-driver-parent - 4.1.1-SNAPSHOT + 4.19.3-SNAPSHOT - java-driver-test-infra bundle - - DataStax Java driver for Apache Cassandra(R) - test infrastructure tools - + Apache Cassandra Java Driver - test infrastructure tools + + + + ${project.groupId} + java-driver-bom + ${project.version} + pom + import + + + - com.datastax.oss + org.apache.cassandra java-driver-core ${project.parent.version} com.github.spotbugs spotbugs-annotations - true + provided junit @@ -48,18 +58,49 @@ org.assertj assertj-core + com.datastax.oss.simulacron simulacron-native-server + true + org.apache.commons commons-exec + true + + + org.awaitility + awaitility - + + + src/main/resources + + + ${project.basedir}/.. + + LICENSE + NOTICE_binary.txt + NOTICE.txt + + META-INF + + + + maven-jar-plugin + + + + com.datastax.oss.driver.tests.infrastructure + + + + org.apache.felix maven-bundle-plugin @@ -68,11 +109,7 @@ com.datastax.oss.driver.testinfra * - - com.datastax.oss.driver.*.testinfra.*, - com.datastax.oss.driver.assertions, - com.datastax.oss.driver.categories - + com.datastax.oss.driver.*.testinfra.*, com.datastax.oss.driver.assertions, com.datastax.oss.driver.categories diff --git a/test-infra/revapi.json b/test-infra/revapi.json index ebcb74d4683..293d9f4d142 100644 --- a/test-infra/revapi.json +++ b/test-infra/revapi.json @@ -1,5 +1,3 @@ -// Configures Revapi (https://revapi.org/getting-started.html) to check API compatibility between -// successive driver versions. { "revapi": { "java": { @@ -7,13 +5,13 @@ "packages": { "regex": true, "exclude": [ - "com\\.datastax\\.oss\\.protocol\\.internal(\\..+)?", - "com\\.datastax\\.oss\\.driver\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.protocol\\.internal(\\..+)?", + "com\\.datastax\\.(oss|dse)\\.driver\\.internal(\\..+)?", "com\\.datastax\\.oss\\.driver\\.shaded(\\..+)?", "com\\.datastax\\.oss\\.simulacron(\\..+)?", "org\\.assertj(\\..+)?", - // Don't re-check sibling modules that this module depends on - "com\\.datastax\\.oss\\.driver\\.api\\.core(\\..+)?" + "// Don't re-check sibling modules that this module depends on", + "com\\.datastax\\.(oss|dse)\\.driver\\.api\\.core(\\..+)?" ] } } @@ -42,6 +40,156 @@ "old": "method com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoaderBuilder com.datastax.oss.driver.api.testinfra.session.SessionUtils::configLoaderBuilder()", "new": "method com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder com.datastax.oss.driver.api.testinfra.session.SessionUtils::configLoaderBuilder()", "justification": "JAVA-2201: Expose a public API for programmatic config" + }, + { + "code": "java.annotation.removed", + "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", + "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "Method arguments were mistakenly annotated with @NonNull" + }, + { + "code": "java.annotation.added", + "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", + "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(===com.datastax.oss.driver.api.core.session.Request===, com.datastax.oss.driver.api.core.session.Session)", + "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", + "justification": "Method arguments were mistakenly annotated with @NonNull" + }, + { + "code": "java.annotation.removed", + "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", + "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", + "annotation": "@edu.umd.cs.findbugs.annotations.NonNull", + "justification": "Method arguments were mistakenly annotated with @NonNull" + }, + { + "code": "java.annotation.added", + "old": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", + "new": "parameter java.util.Queue com.datastax.oss.driver.api.testinfra.loadbalancing.SortingLoadBalancingPolicy::newQueryPlan(com.datastax.oss.driver.api.core.session.Request, ===com.datastax.oss.driver.api.core.session.Session===)", + "annotation": "@edu.umd.cs.findbugs.annotations.Nullable", + "justification": "Method arguments were mistakenly annotated with @NonNull" + }, + { + "code": "java.method.parameterTypeParameterChanged", + "old": "parameter com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter.QueryCounterBuilder com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter::builder(===com.datastax.oss.simulacron.server.BoundTopic===)", + "new": "parameter com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter.QueryCounterBuilder com.datastax.oss.driver.api.testinfra.simulacron.QueryCounter::builder(===com.datastax.oss.simulacron.server.BoundTopic===)", + "justification": "Fix usage of raw type BoundTopic" + }, + { + "code": "java.field.constantValueChanged", + "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD", + "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_KEYSTORE_PASSWORD", + "justification": "JAVA-2620: Use clearly dummy passwords in tests" + }, + { + "code": "java.field.constantValueChanged", + "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD", + "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_CLIENT_TRUSTSTORE_PASSWORD", + "justification": "JAVA-2620: Use clearly dummy passwords in tests" + }, + { + "code": "java.field.constantValueChanged", + "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_KEYSTORE_PASSWORD", + "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_KEYSTORE_PASSWORD", + "justification": "JAVA-2620: Use clearly dummy passwords in tests" + }, + { + "code": "java.field.constantValueChanged", + "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_TRUSTSTORE_PASSWORD", + "new": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DEFAULT_SERVER_TRUSTSTORE_PASSWORD", + "justification": "JAVA-2620: Use clearly dummy passwords in tests" + }, + { + "code": "java.missing.newClass", + "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec", + "justification":"Dependency was made optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec.Builder", + "justification":"Dependency was made optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class com.datastax.oss.simulacron.common.cluster.QueryLog", + "justification":"Dependency was made optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class com.datastax.oss.simulacron.server.BoundCluster", + "justification":"Dependency was made optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class com.datastax.oss.simulacron.server.BoundTopic", + "justification":"Dependency was made optional" + }, + { + "code": "java.missing.newClass", + "new": "missing-class com.datastax.oss.simulacron.server.Server", + "justification":"Dependency was made optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec", + "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec", + "justification": "Dependency was made optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec.Builder", + "new": "missing-class com.datastax.oss.simulacron.common.cluster.ClusterSpec.Builder", + "justification": "Dependency was made optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class com.datastax.oss.simulacron.common.cluster.QueryLog", + "new": "missing-class com.datastax.oss.simulacron.common.cluster.QueryLog", + "justification": "Dependency was made optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class com.datastax.oss.simulacron.server.BoundCluster", + "new": "missing-class com.datastax.oss.simulacron.server.BoundCluster", + "justification": "Dependency was made optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class com.datastax.oss.simulacron.server.BoundTopic", + "new": "missing-class com.datastax.oss.simulacron.server.BoundTopic", + "justification": "Dependency was made optional" + }, + { + "code": "java.missing.oldClass", + "old": "missing-class com.datastax.oss.simulacron.server.Server", + "new": "missing-class com.datastax.oss.simulacron.server.Server", + "justification": "Dependency was made optional" + }, + { + "code": "java.method.removed", + "old": "method void com.datastax.oss.driver.api.testinfra.ccm.CcmRule::reloadCore(int, java.lang.String, java.lang.String, boolean)", + "justification": "Modifying the state of a globally shared CCM instance is dangerous" + }, + { + "code": "java.method.removed", + "old": "method java.util.Optional com.datastax.oss.driver.api.testinfra.ccm.BaseCcmRule::getDseVersion()", + "justification": "Method has been replaced with more generic isDistributionOf(BackendType) and getDistributionVersion()" + }, + { + "code": "java.field.removed", + "old": "field com.datastax.oss.driver.api.testinfra.ccm.CcmBridge.DSE_ENABLEMENT", + "justification": "Method has been replaced with more generic isDistributionOf(BackendType) and getDistributionVersion()" + }, + { + "code": "java.method.nowStatic", + "old": "method com.datastax.oss.driver.api.core.Version com.datastax.oss.driver.api.testinfra.ccm.CcmBridge::getCassandraVersion()", + "new": "method com.datastax.oss.driver.api.core.Version com.datastax.oss.driver.api.testinfra.ccm.CcmBridge::getCassandraVersion()", + "justification": "Previous and current implemntation do not relay on non-static fields" + }, + { + "code": "java.method.removed", + "old": "method java.util.Optional com.datastax.oss.driver.api.testinfra.ccm.CcmBridge::getDseVersion()", + "justification": "Method has been replaced with more generic isDistributionOf(BackendType) and getDistributionVersion()" } ] } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java index e28757e420f..acbee82f3b3 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraRequirement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +23,11 @@ /** * Annotation for a Class or Method that defines a Cassandra Version requirement. If the cassandra * version in use does not meet the version requirement, the test is skipped. + * + * @deprecated Replaced by {@link + * com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement} */ +@Deprecated @Retention(RetentionPolicy.RUNTIME) public @interface CassandraRequirement { diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java index 4e40f2788f7..83c27b45e3b 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/CassandraResourceRule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java index c80c6914282..c1c4249620f 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/DseRequirement.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +23,11 @@ /** * Annotation for a Class or Method that defines a DSE Version requirement. If the DSE version in * use does not meet the version requirement or DSE isn't used at all, the test is skipped. + * + * @deprecated Replaced by {@link + * com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement} */ +@Deprecated @Retention(RetentionPolicy.RUNTIME) public @interface DseRequirement { diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java index 42c754f0dba..882cd55b948 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/BaseCcmRule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,10 +20,9 @@ import com.datastax.oss.driver.api.core.DefaultProtocolVersion; import com.datastax.oss.driver.api.core.ProtocolVersion; import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.testinfra.CassandraRequirement; import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; -import com.datastax.oss.driver.api.testinfra.DseRequirement; -import java.util.Optional; +import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirementRule; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import org.junit.AssumptionViolatedException; import org.junit.runner.Description; import org.junit.runners.model.Statement; @@ -37,7 +38,7 @@ public abstract class BaseCcmRule extends CassandraResourceRule { new Thread( () -> { try { - ccmBridge.remove(); + ccmBridge.close(); } catch (Exception e) { // silently remove as may have already been removed. } @@ -52,96 +53,48 @@ protected void before() { @Override protected void after() { - ccmBridge.remove(); - } - - private Statement buildErrorStatement( - Version requirement, String description, boolean lessThan, boolean dse) { - return new Statement() { - - @Override - public void evaluate() { - throw new AssumptionViolatedException( - String.format( - "Test requires %s %s %s but %s is configured. Description: %s", - lessThan ? "less than" : "at least", - dse ? "DSE" : "C*", - requirement, - dse ? ccmBridge.getDseVersion().orElse(null) : ccmBridge.getCassandraVersion(), - description)); - } - }; + ccmBridge.close(); } @Override public Statement apply(Statement base, Description description) { - // If test is annotated with CassandraRequirement or DseRequirement, ensure configured CCM - // cluster meets those requirements. - CassandraRequirement cassandraRequirement = - description.getAnnotation(CassandraRequirement.class); - - if (cassandraRequirement != null) { - // if the configured cassandra cassandraRequirement exceeds the one being used skip this test. - if (!cassandraRequirement.min().isEmpty()) { - Version minVersion = Version.parse(cassandraRequirement.min()); - if (minVersion.compareTo(ccmBridge.getCassandraVersion()) > 0) { - return buildErrorStatement(minVersion, cassandraRequirement.description(), false, false); - } - } - - if (!cassandraRequirement.max().isEmpty()) { - // if the test version exceeds the maximum configured one, fail out. - Version maxVersion = Version.parse(cassandraRequirement.max()); - - if (maxVersion.compareTo(ccmBridge.getCassandraVersion()) <= 0) { - return buildErrorStatement(maxVersion, cassandraRequirement.description(), true, false); + if (BackendRequirementRule.meetsDescriptionRequirements(description)) { + return super.apply(base, description); + } else { + // requirements not met, throw reasoning assumption to skip test + return new Statement() { + @Override + public void evaluate() { + throw new AssumptionViolatedException( + BackendRequirementRule.buildReasonString(description)); } - } + }; } + } - DseRequirement dseRequirement = description.getAnnotation(DseRequirement.class); - if (dseRequirement != null) { - Optional dseVersionOption = ccmBridge.getDseVersion(); - if (!dseVersionOption.isPresent()) { - return new Statement() { - - @Override - public void evaluate() { - throw new AssumptionViolatedException("Test Requires DSE but C* is configured."); - } - }; - } else { - Version dseVersion = dseVersionOption.get(); - if (!dseRequirement.min().isEmpty()) { - Version minVersion = Version.parse(dseRequirement.min()); - if (minVersion.compareTo(dseVersion) > 0) { - return buildErrorStatement(minVersion, dseRequirement.description(), false, true); - } - } + public BackendType getDistribution() { + return CcmBridge.DISTRIBUTION; + } - if (!dseRequirement.max().isEmpty()) { - Version maxVersion = Version.parse(dseRequirement.max()); + public boolean isDistributionOf(BackendType type) { + return CcmBridge.isDistributionOf(type); + } - if (maxVersion.compareTo(ccmBridge.getCassandraVersion()) <= 0) { - return buildErrorStatement(maxVersion, dseRequirement.description(), true, true); - } - } - } - } - return super.apply(base, description); + public boolean isDistributionOf(BackendType type, CcmBridge.VersionComparator comparator) { + return CcmBridge.isDistributionOf(type, comparator); } - public Version getCassandraVersion() { - return ccmBridge.getCassandraVersion(); + public Version getDistributionVersion() { + return CcmBridge.getDistributionVersion(); } - public Optional getDseVersion() { - return ccmBridge.getDseVersion(); + public Version getCassandraVersion() { + return CcmBridge.getCassandraVersion(); } @Override public ProtocolVersion getHighestProtocolVersion() { - if (ccmBridge.getCassandraVersion().compareTo(Version.V2_2_0) >= 0) { + if (CcmBridge.getCassandraVersion().compareTo(Version.V2_2_0) >= 0) { return DefaultProtocolVersion.V4; } else { return DefaultProtocolVersion.V3; diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java index 54783a3d664..f0ce6bc5b0e 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmBridge.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +17,8 @@ */ package com.datastax.oss.driver.api.testinfra.ccm; -import static io.netty.util.internal.PlatformDependent.isWindows; - import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.shaded.guava.common.base.Joiner; import com.datastax.oss.driver.shaded.guava.common.io.Resources; import java.io.File; @@ -32,7 +33,9 @@ import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -44,46 +47,33 @@ import org.apache.commons.exec.Executor; import org.apache.commons.exec.LogOutputStream; import org.apache.commons.exec.PumpStreamHandler; +import org.assertj.core.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class CcmBridge implements AutoCloseable { - private static final Logger logger = LoggerFactory.getLogger(CcmBridge.class); - - private final int[] nodes; - - private final Path configDirectory; - - private final AtomicBoolean started = new AtomicBoolean(); - - private final AtomicBoolean created = new AtomicBoolean(); + private static final Logger LOG = LoggerFactory.getLogger(CcmBridge.class); - private final String ipPrefix; - - private final Map cassandraConfiguration; - private final Map dseConfiguration; - private final List rawDseYaml; - private final List createOptions; - private final List dseWorkloads; - - private final String jvmArgs; - - public static final Version VERSION = Version.parse(System.getProperty("ccm.version", "3.11.0")); + public static BackendType DISTRIBUTION = + BackendType.valueOf( + System.getProperty("ccm.distribution", BackendType.CASSANDRA.name()).toUpperCase()); + public static final Version VERSION = + Objects.requireNonNull(Version.parse(System.getProperty("ccm.version", "4.0.0"))); public static final String INSTALL_DIRECTORY = System.getProperty("ccm.directory"); public static final String BRANCH = System.getProperty("ccm.branch"); - public static final Boolean DSE_ENABLEMENT = Boolean.getBoolean("ccm.dse"); + public static final String CLUSTER_NAME = "ccm_1"; - public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "fakePasswordForTests"; public static final String DEFAULT_CLIENT_TRUSTSTORE_PATH = "/client.truststore"; public static final File DEFAULT_CLIENT_TRUSTSTORE_FILE = createTempStore(DEFAULT_CLIENT_TRUSTSTORE_PATH); - public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "fakePasswordForTests"; public static final String DEFAULT_CLIENT_KEYSTORE_PATH = "/client.keystore"; public static final File DEFAULT_CLIENT_KEYSTORE_FILE = @@ -93,13 +83,13 @@ public class CcmBridge implements AutoCloseable { public static final File DEFAULT_CLIENT_PRIVATE_KEY_FILE = createTempStore("/client.key"); public static final File DEFAULT_CLIENT_CERT_CHAIN_FILE = createTempStore("/client.crt"); - public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "fakePasswordForTests"; public static final String DEFAULT_SERVER_TRUSTSTORE_PATH = "/server.truststore"; private static final File DEFAULT_SERVER_TRUSTSTORE_FILE = createTempStore(DEFAULT_SERVER_TRUSTSTORE_PATH); - public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "fakePasswordForTests"; public static final String DEFAULT_SERVER_KEYSTORE_PATH = "/server.keystore"; private static final File DEFAULT_SERVER_KEYSTORE_FILE = @@ -113,15 +103,34 @@ public class CcmBridge implements AutoCloseable { createTempStore(DEFAULT_SERVER_LOCALHOST_KEYSTORE_PATH); // major DSE versions - private static final Version V6_0_0 = Version.parse("6.0.0"); - private static final Version V5_1_0 = Version.parse("5.1.0"); - private static final Version V5_0_0 = Version.parse("5.0.0"); + public static final Version V6_0_0 = Version.parse("6.0.0"); + public static final Version V5_1_0 = Version.parse("5.1.0"); + public static final Version V5_0_0 = Version.parse("5.0.0"); // mapped C* versions from DSE versions - private static final Version V4_0_0 = Version.parse("4.0.0"); - private static final Version V3_10 = Version.parse("3.10"); - private static final Version V3_0_15 = Version.parse("3.0.15"); - private static final Version V2_1_19 = Version.parse("2.1.19"); + public static final Version V4_0_0 = Version.parse("4.0.0"); + public static final Version V3_10 = Version.parse("3.10"); + public static final Version V3_0_15 = Version.parse("3.0.15"); + public static final Version V2_1_19 = Version.parse("2.1.19"); + + // mapped C* versions from HCD versions + public static final Version V4_0_11 = Version.parse("4.0.11"); + + static { + LOG.info("CCM Bridge configured with {} version {}", DISTRIBUTION.getFriendlyName(), VERSION); + } + + private final int[] nodes; + private final Path configDirectory; + private final AtomicBoolean started = new AtomicBoolean(); + private final AtomicBoolean created = new AtomicBoolean(); + private final String ipPrefix; + private final Map cassandraConfiguration; + private final Map dseConfiguration; + private final List rawDseYaml; + private final List createOptions; + private final List dseWorkloads; + private final String jvmArgs; private CcmBridge( Path configDirectory, @@ -138,10 +147,7 @@ private CcmBridge( // Hack to ensure that the default DC is always called 'dc1': pass a list ('-nX:0') even if // there is only one DC (with '-nX', CCM configures `SimpleSnitch`, which hard-codes the name // to 'datacenter1') - int[] tmp = new int[2]; - tmp[0] = nodes[0]; - tmp[1] = 0; - this.nodes = tmp; + this.nodes = new int[] {nodes[0], 0}; } else { this.nodes = nodes; } @@ -165,25 +171,48 @@ private CcmBridge( this.dseWorkloads = dseWorkloads; } - public Optional getDseVersion() { - return DSE_ENABLEMENT ? Optional.of(VERSION) : Optional.empty(); + // Copied from Netty's PlatformDependent to avoid the dependency on Netty + private static boolean isWindows() { + return System.getProperty("os.name", "").toLowerCase(Locale.US).contains("win"); } - public Version getCassandraVersion() { - if (!DSE_ENABLEMENT) { + public static boolean isDistributionOf(BackendType type) { + return DISTRIBUTION == type; + } + + public static boolean isDistributionOf(BackendType type, VersionComparator comparator) { + return isDistributionOf(type) + && comparator.accept(getDistributionVersion(), getCassandraVersion()); + } + + public static Version getDistributionVersion() { + return VERSION; + } + + public static Version getCassandraVersion() { + if (isDistributionOf(BackendType.CASSANDRA)) { return VERSION; - } else { - Version stableVersion = VERSION.nextStable(); - if (stableVersion.compareTo(V6_0_0) >= 0) { - return V4_0_0; - } else if (stableVersion.compareTo(V5_1_0) >= 0) { - return V3_10; - } else if (stableVersion.compareTo(V5_0_0) >= 0) { - return V3_0_15; - } else { - return V2_1_19; + } + return DistributionCassandraVersions.getCassandraVersion(DISTRIBUTION, VERSION); + } + + private String getCcmVersionString(Version version) { + // for 4.0 or 5.0 pre-releases, the CCM version string needs to be "4.0-alpha1", "4.0-alpha2" or + // "5.0-beta1" Version.toString() always adds a patch value, even if it's not specified when + // parsing. + if (version.getMajor() >= 4 + && version.getMinor() == 0 + && version.getPatch() == 0 + && version.getPreReleaseLabels() != null) { + // truncate the patch version from the Version string + StringBuilder sb = new StringBuilder(); + sb.append(version.getMajor()).append('.').append(version.getMinor()); + for (String preReleaseString : version.getPreReleaseLabels()) { + sb.append('-').append(preReleaseString); } + return sb.toString(); } + return version.toString(); } public void create() { @@ -194,27 +223,46 @@ public void create() { createOptions.add("-v git:" + BRANCH.trim().replaceAll("\"", "")); } else { - createOptions.add("-v " + VERSION.toString()); - } - if (DSE_ENABLEMENT) { - createOptions.add("--dse"); + createOptions.add("-v " + getCcmVersionString(VERSION)); } + createOptions.addAll(Arrays.asList(DISTRIBUTION.getCcmOptions())); execute( "create", - "ccm_1", + CLUSTER_NAME, "-i", ipPrefix, "-n", Arrays.stream(nodes).mapToObj(n -> "" + n).collect(Collectors.joining(":")), createOptions.stream().collect(Collectors.joining(" "))); + Version cassandraVersion = getCassandraVersion(); for (Map.Entry conf : cassandraConfiguration.entrySet()) { - execute("updateconf", String.format("%s:%s", conf.getKey(), conf.getValue())); + String originalKey = conf.getKey(); + Object originalValue = conf.getValue(); + execute( + "updateconf", + String.join( + ":", + getConfigKey(originalKey, originalValue, cassandraVersion), + getConfigValue(originalKey, originalValue, cassandraVersion))); } - if (getCassandraVersion().compareTo(Version.V2_2_0) >= 0) { - execute("updateconf", "enable_user_defined_functions:true"); + + // If we're dealing with anything more recent than 2.2 explicitly enable UDF... but run it + // through our conversion process to make + // sure more recent versions don't have a problem. + if (cassandraVersion.compareTo(Version.V2_2_0) >= 0 || isDistributionOf(BackendType.HCD)) { + String originalKey = "enable_user_defined_functions"; + Object originalValue = "true"; + execute( + "updateconf", + String.join( + ":", + getConfigKey(originalKey, originalValue, cassandraVersion), + getConfigValue(originalKey, originalValue, cassandraVersion))); } - if (DSE_ENABLEMENT) { + + // Note that we aren't performing any substitution on DSE key/value props (at least for now) + if (isDistributionOf(BackendType.DSE)) { for (Map.Entry conf : dseConfiguration.entrySet()) { execute("updatedseconf", String.format("%s:%s", conf.getKey(), conf.getValue())); } @@ -228,6 +276,10 @@ public void create() { } } + public void nodetool(int node, String... args) { + execute(String.format("node%d nodetool %s", node, Joiner.on(" ").join(args))); + } + public void dsetool(int node, String... args) { execute(String.format("node%d dsetool %s", node, Joiner.on(" ").join(args))); } @@ -238,7 +290,15 @@ public void reloadCore(int node, String keyspace, String table, boolean reindex) public void start() { if (started.compareAndSet(false, true)) { - execute("start", jvmArgs, "--wait-for-binary-proto"); + List cmdAndArgs = Lists.newArrayList("start", jvmArgs, "--wait-for-binary-proto"); + updateJvmVersion(cmdAndArgs); + try { + execute(cmdAndArgs.toArray(new String[0])); + } catch (RuntimeException re) { + // if something went wrong starting CCM, see if we can also dump the error + executeCheckLogError(); + throw re; + } } } @@ -261,13 +321,32 @@ public void resume(int n) { } public void start(int n) { - execute("node" + n, "start"); + List cmdAndArgs = Lists.newArrayList("node" + n, "start"); + updateJvmVersion(cmdAndArgs); + execute(cmdAndArgs.toArray(new String[0])); + } + + private void updateJvmVersion(List cmdAndArgs) { + overrideJvmVersionForDseWorkloads() + .ifPresent(jvmVersion -> cmdAndArgs.add(String.format("--jvm_version=%d", jvmVersion))); } public void stop(int n) { execute("node" + n, "stop"); } + public void add(int n, String dc) { + List addOptions = new ArrayList<>(); + addOptions.addAll(Arrays.asList("add", "-i", ipPrefix + n, "-d", dc, "node" + n)); + addOptions.addAll(Arrays.asList(DISTRIBUTION.getCcmOptions())); + execute(addOptions.toArray(new String[0])); + start(n); + } + + public void decommission(int n) { + nodetool(n, "decommission"); + } + synchronized void execute(String... args) { String command = "ccm " @@ -291,20 +370,38 @@ synchronized void executeUnsanitized(String... args) { } private void execute(CommandLine cli) { - logger.debug("Executing: " + cli); + execute(cli, false); + } + + private void executeCheckLogError() { + String command = "ccm checklogerror --config-dir=" + configDirectory.toFile().getAbsolutePath(); + // force all logs to be error logs + execute(CommandLine.parse(command), true); + } + + private void execute(CommandLine cli, boolean forceErrorLogging) { + if (forceErrorLogging) { + LOG.error("Executing: " + cli); + } else { + LOG.debug("Executing: " + cli); + } ExecuteWatchdog watchDog = new ExecuteWatchdog(TimeUnit.MINUTES.toMillis(10)); try (LogOutputStream outStream = new LogOutputStream() { @Override protected void processLine(String line, int logLevel) { - logger.debug("ccmout> {}", line); + if (forceErrorLogging) { + LOG.error("ccmout> {}", line); + } else { + LOG.debug("ccmout> {}", line); + } } }; LogOutputStream errStream = new LogOutputStream() { @Override protected void processLine(String line, int logLevel) { - logger.error("ccmerr> {}", line); + LOG.error("ccmerr> {}", line); } }) { Executor executor = new DefaultExecutor(); @@ -314,8 +411,7 @@ protected void processLine(String line, int logLevel) { int retValue = executor.execute(cli); if (retValue != 0) { - logger.error( - "Non-zero exit code ({}) returned from executing ccm command: {}", retValue, cli); + LOG.error("Non-zero exit code ({}) returned from executing ccm command: {}", retValue, cli); } } catch (IOException ex) { if (watchDog.killedProcess()) { @@ -328,7 +424,9 @@ protected void processLine(String line, int logLevel) { @Override public void close() { - remove(); + if (created.compareAndSet(true, false)) { + remove(); + } } /** @@ -346,11 +444,84 @@ private static File createTempStore(String storePath) { f.deleteOnExit(); Resources.copy(CcmBridge.class.getResource(storePath), os); } catch (IOException e) { - logger.warn("Failure to write keystore, SSL-enabled servers may fail to start.", e); + LOG.warn("Failure to write keystore, SSL-enabled servers may fail to start.", e); } return f; } + /** + * Get the current JVM major version (1.8.0_372 -> 8, 11.0.19 -> 11) + * + * @return major version of current JVM + */ + private static int getCurrentJvmMajorVersion() { + String version = System.getProperty("java.version"); + if (version.startsWith("1.")) { + version = version.substring(2, 3); + } else { + int dot = version.indexOf("."); + if (dot != -1) { + version = version.substring(0, dot); + } + } + return Integer.parseInt(version); + } + + private Optional overrideJvmVersionForDseWorkloads() { + if (getCurrentJvmMajorVersion() <= 8) { + return Optional.empty(); + } + + if (!isDistributionOf(BackendType.DSE)) { + return Optional.empty(); + } + + if (getDistributionVersion().compareTo(Version.V6_9_0) >= 0) { + // DSE 6.9.0 supports only JVM 11 onwards (also with graph workload) + return Optional.empty(); + } + + if (dseWorkloads.contains("graph")) { + return Optional.of(8); + } + + return Optional.empty(); + } + + private static String IN_MS_STR = "_in_ms"; + private static int IN_MS_STR_LENGTH = IN_MS_STR.length(); + private static String ENABLE_STR = "enable_"; + private static int ENABLE_STR_LENGTH = ENABLE_STR.length(); + private static String IN_KB_STR = "_in_kb"; + private static int IN_KB_STR_LENGTH = IN_KB_STR.length(); + + @SuppressWarnings("unused") + private String getConfigKey(String originalKey, Object originalValue, Version cassandraVersion) { + + // At least for now we won't support substitutions on nested keys. This requires an extra + // traversal of the string + // but we'll live with that for now + if (originalKey.contains(".")) return originalKey; + if (cassandraVersion.compareTo(Version.V4_1_0) < 0) return originalKey; + if (originalKey.endsWith(IN_MS_STR)) + return originalKey.substring(0, originalKey.length() - IN_MS_STR_LENGTH); + if (originalKey.startsWith(ENABLE_STR)) + return originalKey.substring(ENABLE_STR_LENGTH) + "_enabled"; + if (originalKey.endsWith(IN_KB_STR)) + return originalKey.substring(0, originalKey.length() - IN_KB_STR_LENGTH); + return originalKey; + } + + private String getConfigValue( + String originalKey, Object originalValue, Version cassandraVersion) { + + String originalValueStr = originalValue.toString(); + if (cassandraVersion.compareTo(Version.V4_1_0) < 0) return originalValueStr; + if (originalKey.endsWith(IN_MS_STR)) return originalValueStr + "ms"; + if (originalKey.endsWith(IN_KB_STR)) return originalValueStr + "KiB"; + return originalValueStr; + } + public static Builder builder() { return new Builder(); } @@ -419,6 +590,7 @@ public Builder withCreateOption(String option) { /** Enables SSL encryption. */ public Builder withSsl() { cassandraConfiguration.put("client_encryption_options.enabled", "true"); + cassandraConfiguration.put("client_encryption_options.optional", "false"); cassandraConfiguration.put( "client_encryption_options.keystore", DEFAULT_SERVER_KEYSTORE_FILE.getAbsolutePath()); cassandraConfiguration.put( @@ -428,6 +600,7 @@ public Builder withSsl() { public Builder withSslLocalhostCn() { cassandraConfiguration.put("client_encryption_options.enabled", "true"); + cassandraConfiguration.put("client_encryption_options.optional", "false"); cassandraConfiguration.put( "client_encryption_options.keystore", DEFAULT_SERVER_LOCALHOST_KEYSTORE_FILE.getAbsolutePath()); @@ -465,4 +638,8 @@ public CcmBridge build() { dseWorkloads); } } + + public interface VersionComparator { + boolean accept(Version distribution, Version cassandra); + } } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java index eb12b6969e2..e6483c37877 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CcmRule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -97,10 +99,6 @@ public void evaluate() { return super.apply(base, description); } - public void reloadCore(int node, String keyspace, String table, boolean reindex) { - ccmBridge.reloadCore(node, keyspace, table, reindex); - } - public static CcmRule getInstance() { return INSTANCE; } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java index 1e502238e99..5ea1bf7ed3c 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/CustomCcmRule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +18,8 @@ package com.datastax.oss.driver.api.testinfra.ccm; import java.util.concurrent.atomic.AtomicReference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A rule that creates a ccm cluster that can be used in a test. This should be used if you plan on @@ -28,7 +32,8 @@ */ public class CustomCcmRule extends BaseCcmRule { - private static AtomicReference current = new AtomicReference<>(); + private static final Logger LOG = LoggerFactory.getLogger(CustomCcmRule.class); + private static final AtomicReference CURRENT = new AtomicReference<>(); CustomCcmRule(CcmBridge ccmBridge) { super(ccmBridge); @@ -36,9 +41,23 @@ public class CustomCcmRule extends BaseCcmRule { @Override protected void before() { - if (current.get() == null && current.compareAndSet(null, this)) { - super.before(); - } else if (current.get() != this) { + if (CURRENT.get() == null && CURRENT.compareAndSet(null, this)) { + try { + super.before(); + } catch (Exception e) { + // ExternalResource will not call after() when before() throws an exception + // Let's try and clean up and release the lock we have in CURRENT + LOG.warn( + "Error in CustomCcmRule before() method, attempting to clean up leftover state", e); + try { + after(); + } catch (Exception e1) { + LOG.warn("Error cleaning up CustomCcmRule before() failure", e1); + e.addSuppressed(e1); + } + throw e; + } + } else if (CURRENT.get() != this) { throw new IllegalStateException( "Attempting to use a Ccm rule while another is in use. This is disallowed"); } @@ -46,8 +65,11 @@ protected void before() { @Override protected void after() { - super.after(); - current.compareAndSet(this, null); + try { + super.after(); + } finally { + CURRENT.compareAndSet(this, null); + } } public CcmBridge getCcmBridge() { diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java index 01cf3888aa2..0819f785446 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DefaultCcmBridgeBuilderCustomizer.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +18,24 @@ package com.datastax.oss.driver.api.testinfra.ccm; import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +/** @see CcmRule */ +@SuppressWarnings("unused") public class DefaultCcmBridgeBuilderCustomizer { public static CcmBridge.Builder configureBuilder(CcmBridge.Builder builder) { - if (!CcmBridge.DSE_ENABLEMENT && CcmBridge.VERSION.compareTo(Version.V4_0_0) >= 0) { + if (!CcmBridge.isDistributionOf( + BackendType.DSE, (dist, cass) -> dist.nextStable().compareTo(Version.V4_0_0) >= 0) + || CcmBridge.isDistributionOf(BackendType.HCD)) { builder.withCassandraConfiguration("enable_materialized_views", true); + builder.withCassandraConfiguration("enable_sasi_indexes", true); + } + if (CcmBridge.getDistributionVersion().nextStable().compareTo(Version.V3_0_0) >= 0) { + builder.withJvmArgs("-Dcassandra.superuser_setup_delay_ms=0"); + builder.withJvmArgs("-Dcassandra.skip_wait_for_gossip_to_settle=0"); + builder.withCassandraConfiguration("num_tokens", "1"); + builder.withCassandraConfiguration("initial_token", "0"); } return builder; } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DistributionCassandraVersions.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DistributionCassandraVersions.java new file mode 100644 index 00000000000..9f7634d1b37 --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/DistributionCassandraVersions.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.ccm; + +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSortedMap; +import java.util.HashMap; +import java.util.Map; + +/** Defines mapping of various distributions to shipped Apache Cassandra version. */ +public abstract class DistributionCassandraVersions { + private static final Map> mappings = + new HashMap<>(); + + static { + { + // DSE + ImmutableSortedMap dse = + ImmutableSortedMap.of( + Version.V1_0_0, CcmBridge.V2_1_19, + Version.V5_0_0, CcmBridge.V3_0_15, + CcmBridge.V5_1_0, CcmBridge.V3_10, + CcmBridge.V6_0_0, CcmBridge.V4_0_0); + mappings.put(BackendType.DSE, dse); + } + { + // HCD + ImmutableSortedMap hcd = + ImmutableSortedMap.of(Version.V1_0_0, CcmBridge.V4_0_11); + mappings.put(BackendType.HCD, hcd); + } + } + + public static Version getCassandraVersion(BackendType type, Version version) { + ImmutableSortedMap mapping = mappings.get(type); + if (mapping == null) { + return null; + } + return mapping.floorEntry(version).getValue(); + } +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/SchemaChangeSynchronizer.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/SchemaChangeSynchronizer.java new file mode 100644 index 00000000000..093d1d3f9f9 --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/ccm/SchemaChangeSynchronizer.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.ccm; + +import java.util.concurrent.Semaphore; + +/** + * Running multiple parallel integration tests may fail due to query timeout when trying to apply + * several schema changes at once. Limit concurrently executed DDLs to 5. + */ +public class SchemaChangeSynchronizer { + private static final Semaphore lock = new Semaphore(5); + + public static void withLock(Runnable callback) { + try { + lock.acquire(); + try { + callback.run(); + } finally { + lock.release(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Thread interrupted wile waiting to obtain DDL lock", e); + } + } +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/NodeComparator.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/NodeComparator.java new file mode 100644 index 00000000000..3e51ad10e7a --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/NodeComparator.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.loadbalancing; + +import com.datastax.oss.driver.api.core.metadata.Node; +import com.datastax.oss.driver.shaded.guava.common.primitives.UnsignedBytes; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Comparator; + +public final class NodeComparator implements Comparator { + + public static final NodeComparator INSTANCE = new NodeComparator(); + + private static final byte[] EMPTY = {}; + + private NodeComparator() {} + + @Override + public int compare(Node node1, Node node2) { + // compare address bytes, byte by byte. + byte[] address1 = + node1 + .getBroadcastAddress() + .map(InetSocketAddress::getAddress) + .map(InetAddress::getAddress) + .orElse(EMPTY); + byte[] address2 = + node2 + .getBroadcastAddress() + .map(InetSocketAddress::getAddress) + .map(InetAddress::getAddress) + .orElse(EMPTY); + + int result = UnsignedBytes.lexicographicalComparator().compare(address1, address2); + if (result != 0) { + return result; + } + + int port1 = node1.getBroadcastAddress().map(InetSocketAddress::getPort).orElse(0); + int port2 = node2.getBroadcastAddress().map(InetSocketAddress::getPort).orElse(0); + return port1 - port2; + } +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java index 678b1477dee..a0fa292b0bb 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/loadbalancing/SortingLoadBalancingPolicy.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,8 +24,7 @@ import com.datastax.oss.driver.api.core.session.Request; import com.datastax.oss.driver.api.core.session.Session; import edu.umd.cs.findbugs.annotations.NonNull; -import java.net.InetAddress; -import java.net.InetSocketAddress; +import edu.umd.cs.findbugs.annotations.Nullable; import java.util.ArrayDeque; import java.util.Map; import java.util.Queue; @@ -33,46 +34,13 @@ public class SortingLoadBalancingPolicy implements LoadBalancingPolicy { + private final Set nodes = new TreeSet<>(NodeComparator.INSTANCE); + @SuppressWarnings("unused") public SortingLoadBalancingPolicy(DriverContext context, String profileName) { // constructor needed for loading via config. } - private byte[] empty = {}; - private final Set nodes = - new TreeSet<>( - (node1, node2) -> { - // compare address bytes, byte by byte. - byte[] address1 = - node1 - .getBroadcastAddress() - .map(InetSocketAddress::getAddress) - .map(InetAddress::getAddress) - .orElse(empty); - byte[] address2 = - node2 - .getBroadcastAddress() - .map(InetSocketAddress::getAddress) - .map(InetAddress::getAddress) - .orElse(empty); - - // ipv6 vs ipv4, favor ipv6. - if (address1.length != address2.length) { - return address1.length - address2.length; - } - - for (int i = 0; i < address1.length; i++) { - int b1 = address1[i] & 0xFF; - int b2 = address2[i] & 0xFF; - if (b1 != b2) { - return b1 - b2; - } - } - int port1 = node1.getBroadcastAddress().map(InetSocketAddress::getPort).orElse(0); - int port2 = node2.getBroadcastAddress().map(InetSocketAddress::getPort).orElse(0); - return port1 - port2; - }); - public SortingLoadBalancingPolicy() {} @Override @@ -83,7 +51,7 @@ public void init(@NonNull Map nodes, @NonNull DistanceReporter dista @NonNull @Override - public Queue newQueryPlan(@NonNull Request request, @NonNull Session session) { + public Queue newQueryPlan(@Nullable Request request, @Nullable Session session) { return new ArrayDeque<>(nodes); } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirement.java new file mode 100644 index 00000000000..9b1400b6313 --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirement.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.requirement; + +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +/** + * Annotation for a Class or Method that defines a database backend Version requirement. If the + * type/version in use does not meet the requirement, the test is skipped. + */ +@Repeatable(BackendRequirements.class) +@Retention(RetentionPolicy.RUNTIME) +public @interface BackendRequirement { + BackendType type(); + + String minInclusive() default ""; + + String maxExclusive() default ""; + + String description() default ""; +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirementRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirementRule.java new file mode 100644 index 00000000000..343861571e0 --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirementRule.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.requirement; + +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import org.junit.AssumptionViolatedException; +import org.junit.rules.ExternalResource; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +public class BackendRequirementRule extends ExternalResource { + @Override + public Statement apply(Statement base, Description description) { + if (meetsDescriptionRequirements(description)) { + return super.apply(base, description); + } else { + // requirements not met, throw reasoning assumption to skip test + return new Statement() { + @Override + public void evaluate() { + throw new AssumptionViolatedException(buildReasonString(description)); + } + }; + } + } + + protected static BackendType getBackendType() { + return CcmBridge.DISTRIBUTION; + } + + protected static Version getVersion() { + return CcmBridge.VERSION; + } + + public static boolean meetsDescriptionRequirements(Description description) { + return VersionRequirement.meetsAny( + VersionRequirement.fromAnnotations(description), getBackendType(), getVersion()); + } + + /* Note, duplicating annotation processing from #meetsDescriptionRequirements */ + public static String buildReasonString(Description description) { + return VersionRequirement.buildReasonString( + VersionRequirement.fromAnnotations(description), getBackendType(), getVersion()); + } +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirements.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirements.java new file mode 100644 index 00000000000..c097c7bd430 --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendRequirements.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.requirement; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +/** Annotation to allow @BackendRequirement to be repeatable. */ +@Retention(RetentionPolicy.RUNTIME) +public @interface BackendRequirements { + BackendRequirement[] value(); +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendType.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendType.java new file mode 100644 index 00000000000..e0058ca324a --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/BackendType.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.requirement; + +public enum BackendType { + CASSANDRA("Apache Cassandra"), + DSE("DSE"), + HCD("HCD"); + + final String friendlyName; + + BackendType(String friendlyName) { + this.friendlyName = friendlyName; + } + + public String getFriendlyName() { + return friendlyName; + } + + public String[] getCcmOptions() { + if (this == CASSANDRA) { + return new String[0]; + } + return new String[] {"--" + name().toLowerCase()}; + } +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirement.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirement.java new file mode 100644 index 00000000000..6b184490a41 --- /dev/null +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirement.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.requirement; + +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.api.testinfra.CassandraRequirement; +import com.datastax.oss.driver.api.testinfra.DseRequirement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Optional; +import java.util.stream.Collectors; +import org.junit.runner.Description; + +/** + * Used to unify the requirements specified by + * annotations @CassandraRequirement, @DseRequirment, @BackendRequirement + */ +public class VersionRequirement { + final BackendType backendType; + final Optional minInclusive; + final Optional maxExclusive; + final String description; + + public VersionRequirement( + BackendType backendType, String minInclusive, String maxExclusive, String description) { + this.backendType = backendType; + this.minInclusive = + minInclusive.isEmpty() ? Optional.empty() : Optional.of(Version.parse(minInclusive)); + this.maxExclusive = + maxExclusive.isEmpty() ? Optional.empty() : Optional.of(Version.parse(maxExclusive)); + this.description = description; + } + + public BackendType getBackendType() { + return backendType; + } + + public Optional getMinInclusive() { + return minInclusive; + } + + public Optional getMaxExclusive() { + return maxExclusive; + } + + public String readableString() { + final String versionRange; + if (minInclusive.isPresent() && maxExclusive.isPresent()) { + versionRange = + String.format("%s or greater, but less than %s", minInclusive.get(), maxExclusive.get()); + } else if (minInclusive.isPresent()) { + versionRange = String.format("%s or greater", minInclusive.get()); + } else if (maxExclusive.isPresent()) { + versionRange = String.format("less than %s", maxExclusive.get()); + } else { + versionRange = "any version"; + } + + if (!description.isEmpty()) { + return String.format("%s %s [%s]", backendType.getFriendlyName(), versionRange, description); + } else { + return String.format("%s %s", backendType.getFriendlyName(), versionRange); + } + } + + public static VersionRequirement fromBackendRequirement(BackendRequirement requirement) { + return new VersionRequirement( + requirement.type(), + requirement.minInclusive(), + requirement.maxExclusive(), + requirement.description()); + } + + public static VersionRequirement fromCassandraRequirement(CassandraRequirement requirement) { + return new VersionRequirement( + BackendType.CASSANDRA, requirement.min(), requirement.max(), requirement.description()); + } + + public static VersionRequirement fromDseRequirement(DseRequirement requirement) { + return new VersionRequirement( + BackendType.DSE, requirement.min(), requirement.max(), requirement.description()); + } + + public static Collection fromAnnotations(Description description) { + // collect all requirement annotation types + CassandraRequirement cassandraRequirement = + description.getAnnotation(CassandraRequirement.class); + DseRequirement dseRequirement = description.getAnnotation(DseRequirement.class); + // matches methods/classes with one @BackendRequirement annotation + BackendRequirement backendRequirement = description.getAnnotation(BackendRequirement.class); + // matches methods/classes with two or more @BackendRequirement annotations + BackendRequirements backendRequirements = description.getAnnotation(BackendRequirements.class); + + // build list of required versions + Collection requirements = new ArrayList<>(); + if (cassandraRequirement != null) { + requirements.add(VersionRequirement.fromCassandraRequirement(cassandraRequirement)); + } + if (dseRequirement != null) { + requirements.add(VersionRequirement.fromDseRequirement(dseRequirement)); + } + if (backendRequirement != null) { + requirements.add(VersionRequirement.fromBackendRequirement(backendRequirement)); + } + if (backendRequirements != null) { + Arrays.stream(backendRequirements.value()) + .forEach(r -> requirements.add(VersionRequirement.fromBackendRequirement(r))); + } + return requirements; + } + + public static boolean meetsAny( + Collection requirements, + BackendType configuredBackend, + Version configuredVersion) { + // special case: if there are no requirements then any backend/version is sufficient + if (requirements.isEmpty()) { + return true; + } + + return requirements.stream() + .anyMatch( + requirement -> { + // requirement is different db type + if (requirement.getBackendType() != configuredBackend) { + return false; + } + + // configured version is less than requirement min + if (requirement.getMinInclusive().isPresent()) { + if (requirement.getMinInclusive().get().compareTo(configuredVersion) > 0) { + return false; + } + } + + // configured version is greater than or equal to requirement max + if (requirement.getMaxExclusive().isPresent()) { + if (requirement.getMaxExclusive().get().compareTo(configuredVersion) <= 0) { + return false; + } + } + + // backend type and version range match + return true; + }); + } + + public static String buildReasonString( + Collection requirements, BackendType backend, Version version) { + return String.format( + "Test requires one of:\n%s\nbut configuration is %s %s.", + requirements.stream() + .map(req -> String.format(" - %s", req.readableString())) + .collect(Collectors.joining("\n")), + backend.getFriendlyName(), + version); + } +} diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java index 3d864ab007c..8f392dca0bf 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/CqlSessionRuleBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +17,84 @@ */ package com.datastax.oss.driver.api.testinfra.session; +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.dse.driver.api.core.config.DseDriverOption; import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; +import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; +import com.typesafe.config.Config; +import com.typesafe.config.ConfigValueFactory; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; public class CqlSessionRuleBuilder extends SessionRuleBuilder { + private static final AtomicInteger GRAPH_NAME_INDEX = new AtomicInteger(); + public CqlSessionRuleBuilder(CassandraResourceRule cassandraResource) { super(cassandraResource); } @Override public SessionRule build() { + + final String graphName; + final DriverConfigLoader actualLoader; + + Supplier actualSupplier; + + if (createGraph) { + graphName = "dsedrivertests_" + GRAPH_NAME_INDEX.getAndIncrement(); + + // Inject the generated graph name in the provided configuration, so that the test doesn't + // need to set it explicitly on every statement. + if (loader == null) { + // This would normally be handled in DseSessionBuilder, do it early because we need it now + loader = new DefaultDriverConfigLoader(); + } else { + // To keep this relatively simple we assume that if the config loader was provided in a + // test, it is the Typesafe-config based one. This is always true in our integration tests. + assertThat(loader).isInstanceOf(DefaultDriverConfigLoader.class); + } + Supplier originalSupplier = ((DefaultDriverConfigLoader) loader).getConfigSupplier(); + actualSupplier = + () -> + originalSupplier + .get() + .withValue( + DseDriverOption.GRAPH_NAME.getPath(), + ConfigValueFactory.fromAnyRef(graphName)); + } else { + graphName = null; + if (loader == null) { + loader = new DefaultDriverConfigLoader(); + } + + actualSupplier = ((DefaultDriverConfigLoader) loader).getConfigSupplier(); + } + + actualLoader = + new DefaultDriverConfigLoader( + () -> + graphProtocol != null + ? actualSupplier + .get() + .withValue( + DseDriverOption.GRAPH_SUB_PROTOCOL.getPath(), + ConfigValueFactory.fromAnyRef(graphProtocol)) + // will use the protocol from the config file (in application.conf if + // defined or in reference.conf) + : actualSupplier.get()); + return new SessionRule<>( - cassandraResource, createKeyspace, nodeStateListener, schemaChangeListener, loader); + cassandraResource, + createKeyspace, + nodeStateListener, + schemaChangeListener, + actualLoader, + graphName, + isCoreGraph); } } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java index 585aaff2aa7..3b792374769 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +17,9 @@ */ package com.datastax.oss.driver.api.testinfra.session; +import com.datastax.dse.driver.api.core.graph.ScriptGraphStatement; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.config.DriverConfigLoader; import com.datastax.oss.driver.api.core.config.DriverExecutionProfile; import com.datastax.oss.driver.api.core.cql.SimpleStatement; @@ -24,7 +28,12 @@ import com.datastax.oss.driver.api.core.metadata.schema.SchemaChangeListener; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; +import com.datastax.oss.driver.api.testinfra.ccm.BaseCcmRule; +import com.datastax.oss.driver.api.testinfra.ccm.CcmBridge; +import com.datastax.oss.driver.api.testinfra.ccm.SchemaChangeSynchronizer; +import com.datastax.oss.driver.api.testinfra.requirement.BackendType; import com.datastax.oss.driver.api.testinfra.simulacron.SimulacronRule; +import java.util.Objects; import org.junit.rules.ExternalResource; /** @@ -54,12 +63,16 @@ */ public class SessionRule extends ExternalResource { + private static final Version V6_8_0 = Objects.requireNonNull(Version.parse("6.8.0")); + // the CCM or Simulacron rule to depend on private final CassandraResourceRule cassandraResource; private final NodeStateListener nodeStateListener; private final SchemaChangeListener schemaChangeListener; private final CqlIdentifier keyspace; private final DriverConfigLoader configLoader; + private final String graphName; + private final boolean isCoreGraph; // the session that is auto created for this rule and is tied to the given keyspace. private SessionT session; @@ -81,7 +94,9 @@ public SessionRule( boolean createKeyspace, NodeStateListener nodeStateListener, SchemaChangeListener schemaChangeListener, - DriverConfigLoader configLoader) { + DriverConfigLoader configLoader, + String graphName, + boolean isCoreGraph) { this.cassandraResource = cassandraResource; this.nodeStateListener = nodeStateListener; this.schemaChangeListener = schemaChangeListener; @@ -90,6 +105,41 @@ public SessionRule( ? null : SessionUtils.uniqueKeyspaceId(); this.configLoader = configLoader; + this.graphName = graphName; + this.isCoreGraph = isCoreGraph; + } + + public SessionRule( + CassandraResourceRule cassandraResource, + boolean createKeyspace, + NodeStateListener nodeStateListener, + SchemaChangeListener schemaChangeListener, + DriverConfigLoader configLoader, + String graphName) { + this( + cassandraResource, + createKeyspace, + nodeStateListener, + schemaChangeListener, + configLoader, + graphName, + false); + } + + public SessionRule( + CassandraResourceRule cassandraResource, + boolean createKeyspace, + NodeStateListener nodeStateListener, + SchemaChangeListener schemaChangeListener, + DriverConfigLoader configLoader) { + this( + cassandraResource, + createKeyspace, + nodeStateListener, + schemaChangeListener, + configLoader, + null, + false); } @Override @@ -104,12 +154,51 @@ protected void before() { SimpleStatement.newInstance(String.format("USE %s", keyspace.asCql(false))), Statement.SYNC); } + if (graphName != null) { + BaseCcmRule rule = + (cassandraResource instanceof BaseCcmRule) ? ((BaseCcmRule) cassandraResource) : null; + if (rule == null || !CcmBridge.isDistributionOf(BackendType.DSE)) { + throw new IllegalArgumentException("DseSessionRule should work with DSE."); + } + if (rule.getDistributionVersion().compareTo(V6_8_0) >= 0) { + session() + .execute( + ScriptGraphStatement.newInstance( + String.format( + "system.graph('%s').ifNotExists()%s.create()", + this.graphName, isCoreGraph ? ".coreEngine()" : ".classicEngine()")) + .setSystemQuery(true), + ScriptGraphStatement.SYNC); + } else { + if (isCoreGraph) { + throw new IllegalArgumentException( + "Core graph is not supported for DSE version < " + V6_8_0); + } + session() + .execute( + ScriptGraphStatement.newInstance( + String.format("system.graph('%s').ifNotExists().create()", this.graphName)) + .setSystemQuery(true), + ScriptGraphStatement.SYNC); + } + } } @Override protected void after() { + if (graphName != null) { + session() + .execute( + ScriptGraphStatement.newInstance( + String.format("system.graph('%s').drop()", this.graphName)) + .setSystemQuery(true), + ScriptGraphStatement.SYNC); + } if (keyspace != null) { - SessionUtils.dropKeyspace(session, keyspace, slowProfile); + SchemaChangeSynchronizer.withLock( + () -> { + SessionUtils.dropKeyspace(session, keyspace, slowProfile); + }); } session.close(); } @@ -128,6 +217,10 @@ public CqlIdentifier keyspace() { return keyspace; } + public String getGraphName() { + return graphName; + } + /** @return a config profile where the request timeout is 30 seconds. * */ public DriverExecutionProfile slowProfile() { return slowProfile; diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java index 5bc56b0c4a2..62c5babbf1d 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionRuleBuilder.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,6 +33,9 @@ public abstract class SessionRuleBuilder< protected NodeStateListener nodeStateListener; protected SchemaChangeListener schemaChangeListener; protected DriverConfigLoader loader; + protected boolean createGraph; + protected boolean isCoreGraph; + protected String graphProtocol; @SuppressWarnings("unchecked") protected final SelfT self = (SelfT) this; @@ -74,5 +79,30 @@ public SelfT withSchemaChangeListener(SchemaChangeListener listener) { return self; } + /** + * Configures the rule to create a new graph instance. + * + *

          This assumes that the associated {@link CassandraResourceRule} is a DSE instance with the + * graph workload enabled. + * + *

          The name of the graph will be injected in the session's configuration, so that all graph + * statements are automatically routed to it. It's also exposed via {@link + * SessionRule#getGraphName()}. + */ + public SelfT withCreateGraph() { + this.createGraph = true; + return self; + } + + public SelfT withCoreEngine() { + this.isCoreGraph = true; + return self; + } + + public SelfT withGraphProtocol(String graphProtocol) { + this.graphProtocol = graphProtocol; + return self; + } + public abstract SessionRule build(); } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java index 34f5554ccb2..7536c0ffdc0 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/session/SessionUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,6 +31,7 @@ import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.session.SessionBuilder; import com.datastax.oss.driver.api.testinfra.CassandraResourceRule; +import com.datastax.oss.driver.internal.core.loadbalancing.helper.NodeFilterToDistanceEvaluatorAdapter; import java.lang.reflect.Method; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; @@ -60,39 +63,46 @@ * SessionRule} provides a simpler alternative. */ public class SessionUtils { + + public static final String SESSION_BUILDER_CLASS_PROPERTY = "session.builder"; + private static final Logger LOG = LoggerFactory.getLogger(SessionUtils.class); private static final AtomicInteger keyspaceId = new AtomicInteger(); private static final String DEFAULT_SESSION_CLASS_NAME = CqlSession.class.getName(); - private static final String SESSION_BUILDER_CLASS = - System.getProperty("session.builder", DEFAULT_SESSION_CLASS_NAME); + + private static String getSessionBuilderClass() { + return System.getProperty(SESSION_BUILDER_CLASS_PROPERTY, DEFAULT_SESSION_CLASS_NAME); + } @SuppressWarnings("unchecked") public static SessionBuilder baseBuilder() { + String sessionBuilderClass = getSessionBuilderClass(); try { - Class clazz = Class.forName(SESSION_BUILDER_CLASS); + Class clazz = Class.forName(sessionBuilderClass); Method m = clazz.getMethod("builder"); return (SessionBuilder) m.invoke(null); } catch (Exception e) { LOG.warn( "Could not construct SessionBuilder from {} using builder(), using default " + "implementation.", - SESSION_BUILDER_CLASS, + sessionBuilderClass, e); return (SessionBuilder) CqlSession.builder(); } } public static ProgrammaticDriverConfigLoaderBuilder configLoaderBuilder() { + String sessionBuilderClass = getSessionBuilderClass(); try { - Class clazz = Class.forName(SESSION_BUILDER_CLASS); + Class clazz = Class.forName(sessionBuilderClass); Method m = clazz.getMethod("configLoaderBuilder"); return (ProgrammaticDriverConfigLoaderBuilder) m.invoke(null); } catch (Exception e) { - if (!SESSION_BUILDER_CLASS.equals(DEFAULT_SESSION_CLASS_NAME)) { + if (!sessionBuilderClass.equals(DEFAULT_SESSION_CLASS_NAME)) { LOG.warn( "Could not construct ProgrammaticDriverConfigLoaderBuilder from {} using " + "configLoaderBuilder(), using default implementation.", - SESSION_BUILDER_CLASS, + sessionBuilderClass, e); } return DriverConfigLoader.programmaticBuilder(); @@ -130,34 +140,34 @@ public static SessionT newSession( return newSession(cassandraResourceRule, keyspace, null, null, null, loader); } - private static SessionBuilder builder( + private static SessionBuilder builder( CassandraResourceRule cassandraResource, CqlIdentifier keyspace, NodeStateListener nodeStateListener, SchemaChangeListener schemaChangeListener, Predicate nodeFilter) { - SessionBuilder builder = - baseBuilder() - .addContactEndPoints(cassandraResource.getContactPoints()) - .withKeyspace(keyspace) - .withNodeStateListener(nodeStateListener) - .withSchemaChangeListener(schemaChangeListener); + SessionBuilder builder = baseBuilder(); + builder + .addContactEndPoints(cassandraResource.getContactPoints()) + .withKeyspace(keyspace) + .withNodeStateListener(nodeStateListener) + .withSchemaChangeListener(schemaChangeListener); if (nodeFilter != null) { - builder = builder.withNodeFilter(nodeFilter); + builder.withNodeDistanceEvaluator(new NodeFilterToDistanceEvaluatorAdapter(nodeFilter)); } return builder; } - @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) + @SuppressWarnings({"TypeParameterUnusedInFormals"}) public static SessionT newSession( CassandraResourceRule cassandraResource, CqlIdentifier keyspace, NodeStateListener nodeStateListener, SchemaChangeListener schemaChangeListener, Predicate nodeFilter) { - SessionBuilder builder = + SessionBuilder builder = builder(cassandraResource, keyspace, nodeStateListener, schemaChangeListener, nodeFilter); - return (SessionT) builder.build(); + return builder.build(); } @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) @@ -168,7 +178,7 @@ public static SessionT newSession( SchemaChangeListener schemaChangeListener, Predicate nodeFilter, DriverConfigLoader loader) { - SessionBuilder builder = + SessionBuilder builder = builder(cassandraResource, keyspace, nodeStateListener, schemaChangeListener, nodeFilter); return (SessionT) builder.withConfigLoader(loader).build(); } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java index ffca180c13f..90a0050265c 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/QueryCounter.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +18,8 @@ package com.datastax.oss.driver.api.testinfra.simulacron; import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; -import com.datastax.oss.driver.api.testinfra.utils.ConditionChecker; import com.datastax.oss.simulacron.common.cluster.QueryLog; import com.datastax.oss.simulacron.server.BoundNode; import com.datastax.oss.simulacron.server.BoundTopic; @@ -51,7 +53,7 @@ public enum NotificationMode { } private QueryCounter( - BoundTopic topic, + BoundTopic topic, NotificationMode notificationMode, Predicate queryLogFilter, long beforeTimeout, @@ -68,7 +70,7 @@ private QueryCounter( } /** Creates a builder that tracks counts for the given {@link BoundTopic} (cluster, dc, node). */ - public static QueryCounterBuilder builder(BoundTopic topic) { + public static QueryCounterBuilder builder(BoundTopic topic) { return new QueryCounterBuilder(topic); } @@ -83,10 +85,10 @@ public void clearCounts() { * expected count within the configured time period. */ public void assertTotalCount(int expected) { - ConditionChecker.checkThat(() -> assertThat(totalCount.get()).isEqualTo(expected)) - .every(10, TimeUnit.MILLISECONDS) - .before(beforeTimeout, beforeUnit) - .becomesTrue(); + await() + .pollInterval(10, TimeUnit.MILLISECONDS) + .atMost(beforeTimeout, beforeUnit) + .untilAsserted(() -> assertThat(totalCount.get()).isEqualTo(expected)); } /** @@ -104,24 +106,25 @@ public void assertNodeCounts(int... counts) { expectedCounts.put(id, counts[id]); } } - ConditionChecker.checkThat(() -> assertThat(countMap).containsAllEntriesOf(expectedCounts)) - .every(10, TimeUnit.MILLISECONDS) - .before(beforeTimeout, beforeUnit) - .becomesTrue(); + await() + .pollInterval(10, TimeUnit.MILLISECONDS) + .atMost(beforeTimeout, beforeUnit) + .untilAsserted(() -> assertThat(countMap).containsAllEntriesOf(expectedCounts)); } public static class QueryCounterBuilder { - @SuppressWarnings("deprecation") - private static Predicate DEFAULT_FILTER = (q) -> !q.getQuery().isEmpty(); + @SuppressWarnings("UnnecessaryLambda") + private static final Predicate DEFAULT_FILTER = (q) -> !q.getQuery().isEmpty(); + + private final BoundTopic topic; private Predicate queryLogFilter = DEFAULT_FILTER; - private BoundTopic topic; private NotificationMode notificationMode = NotificationMode.BEFORE_PROCESSING; private long beforeTimeout = 1; private TimeUnit beforeUnit = TimeUnit.SECONDS; - private QueryCounterBuilder(BoundTopic topic) { + private QueryCounterBuilder(BoundTopic topic) { this.topic = topic; } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java index 8b4b9ce9e7a..d958d097a5d 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/simulacron/SimulacronRule.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,6 +34,7 @@ public class SimulacronRule extends CassandraResourceRule { // TODO perhaps share server some other way // TODO: Temporarily do not release addresses to ensure IPs are always ordered + // TODO: Add a way to configure the server for multiple nodes per ip public static final Server server = Server.builder() .withAddressResolver( diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java index e57cf1fedbd..931237189c5 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/ConditionChecker.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,12 +27,21 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.BooleanSupplier; +/** + * @deprecated We've replaced this home-grown utility by Awaitility in our tests. We're preserving + * it because it was part of the public test infrastructure API, but it won't be maintained + * anymore, and removed in the next major version. + * @see Awaitility homepage + */ +@Deprecated public class ConditionChecker { private static final int DEFAULT_PERIOD_MILLIS = 500; private static final int DEFAULT_TIMEOUT_MILLIS = 60000; + /** @deprecated see {@link ConditionChecker} */ + @Deprecated public static class ConditionCheckerBuilder { private long timeout = DEFAULT_TIMEOUT_MILLIS; diff --git a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java index 300befe71d4..b1d41562287 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/api/testinfra/utils/NodeUtils.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +19,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.awaitility.Awaitility.await; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.NodeState; @@ -47,9 +50,9 @@ public static void waitForDown(Node node, int timeoutSeconds) { public static void waitFor(Node node, int timeoutSeconds, NodeState nodeState) { logger.debug("Waiting for node {} to enter state {}", node, nodeState); - ConditionChecker.checkThat(() -> node.getState().equals(nodeState)) - .every(100, MILLISECONDS) - .before(timeoutSeconds, SECONDS) - .becomesTrue(); + await() + .pollInterval(100, MILLISECONDS) + .atMost(timeoutSeconds, SECONDS) + .until(() -> node.getState().equals(nodeState)); } } diff --git a/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java b/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java index 1a70c5a9c94..277fe54195d 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/assertions/Assertions.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java b/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java index 904f60f9793..9d4bd9be637 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/assertions/NodeMetadataAssert.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java b/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java index 4309d0c15fc..a8b8ea40a10 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/categories/IsolatedTests.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java b/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java index 1b6efa9115a..2c718bc08d7 100644 --- a/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java +++ b/test-infra/src/main/java/com/datastax/oss/driver/categories/ParallelizableTests.java @@ -1,11 +1,13 @@ /* - * Copyright DataStax, Inc. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/test-infra/src/main/resources/client.keystore b/test-infra/src/main/resources/client.keystore index f1030fdb377..c8b11cc0c9b 100644 Binary files a/test-infra/src/main/resources/client.keystore and b/test-infra/src/main/resources/client.keystore differ diff --git a/test-infra/src/main/resources/client.truststore b/test-infra/src/main/resources/client.truststore index f106bdee38d..169986dea99 100644 Binary files a/test-infra/src/main/resources/client.truststore and b/test-infra/src/main/resources/client.truststore differ diff --git a/test-infra/src/main/resources/server.keystore b/test-infra/src/main/resources/server.keystore index a8959b18888..c6166977b3d 100644 Binary files a/test-infra/src/main/resources/server.keystore and b/test-infra/src/main/resources/server.keystore differ diff --git a/test-infra/src/main/resources/server.truststore b/test-infra/src/main/resources/server.truststore index 1a3cfeea19f..019bca91205 100644 Binary files a/test-infra/src/main/resources/server.truststore and b/test-infra/src/main/resources/server.truststore differ diff --git a/test-infra/src/main/resources/server_localhost.keystore b/test-infra/src/main/resources/server_localhost.keystore index 05e7a559c5d..d246e430e08 100644 Binary files a/test-infra/src/main/resources/server_localhost.keystore and b/test-infra/src/main/resources/server_localhost.keystore differ diff --git a/test-infra/src/test/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirementTest.java b/test-infra/src/test/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirementTest.java new file mode 100644 index 00000000000..ccddb18c80f --- /dev/null +++ b/test-infra/src/test/java/com/datastax/oss/driver/api/testinfra/requirement/VersionRequirementTest.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.oss.driver.api.testinfra.requirement; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.datastax.oss.driver.api.core.Version; +import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableList; +import java.util.Collections; +import java.util.List; +import org.junit.Test; + +public class VersionRequirementTest { + // backend aliases + private static BackendType CASSANDRA = BackendType.CASSANDRA; + private static BackendType DSE = BackendType.DSE; + + // version numbers + private static Version V_0_0_0 = Version.parse("0.0.0"); + private static Version V_0_1_0 = Version.parse("0.1.0"); + private static Version V_1_0_0 = Version.parse("1.0.0"); + private static Version V_1_0_1 = Version.parse("1.0.1"); + private static Version V_1_1_0 = Version.parse("1.1.0"); + private static Version V_2_0_0 = Version.parse("2.0.0"); + private static Version V_2_0_1 = Version.parse("2.0.1"); + private static Version V_3_0_0 = Version.parse("3.0.0"); + private static Version V_3_1_0 = Version.parse("3.1.0"); + private static Version V_4_0_0 = Version.parse("4.0.0"); + + // requirements + private static VersionRequirement CASSANDRA_ANY = new VersionRequirement(CASSANDRA, "", "", ""); + private static VersionRequirement CASSANDRA_FROM_1_0_0 = + new VersionRequirement(CASSANDRA, "1.0.0", "", ""); + private static VersionRequirement CASSANDRA_TO_1_0_0 = + new VersionRequirement(CASSANDRA, "", "1.0.0", ""); + private static VersionRequirement CASSANDRA_FROM_1_0_0_TO_2_0_0 = + new VersionRequirement(CASSANDRA, "1.0.0", "2.0.0", ""); + private static VersionRequirement CASSANDRA_FROM_1_1_0 = + new VersionRequirement(CASSANDRA, "1.1.0", "", ""); + private static VersionRequirement CASSANDRA_FROM_3_0_0_TO_3_1_0 = + new VersionRequirement(CASSANDRA, "3.0.0", "3.1.0", ""); + private static VersionRequirement DSE_ANY = new VersionRequirement(DSE, "", "", ""); + + @Test + public void empty_requirements() { + List req = Collections.emptyList(); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, DSE, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isTrue(); + } + + @Test + public void single_requirement_any_version() { + List anyCassandra = Collections.singletonList(CASSANDRA_ANY); + List anyDse = Collections.singletonList(DSE_ANY); + + assertThat(VersionRequirement.meetsAny(anyCassandra, CASSANDRA, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(anyCassandra, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(anyDse, DSE, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(anyDse, DSE, V_1_0_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(anyDse, CASSANDRA, V_0_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(anyDse, CASSANDRA, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(anyCassandra, DSE, V_0_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(anyCassandra, DSE, V_1_0_0)).isFalse(); + } + + @Test + public void single_requirement_min_only() { + List req = Collections.singletonList(CASSANDRA_FROM_1_0_0); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_1_0)).isFalse(); + } + + @Test + public void single_requirement_max_only() { + List req = Collections.singletonList(CASSANDRA_TO_1_0_0); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_1_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, DSE, V_0_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isFalse(); + } + + @Test + public void single_requirement_min_and_max() { + List req = Collections.singletonList(CASSANDRA_FROM_1_0_0_TO_2_0_0); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_1_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_1)).isFalse(); + } + + @Test + public void multi_requirement_any_version() { + List req = ImmutableList.of(CASSANDRA_ANY, DSE_ANY); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isTrue(); + } + + @Test + public void multi_db_requirement_min_one_any_other() { + List req = ImmutableList.of(CASSANDRA_FROM_1_0_0, DSE_ANY); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, DSE, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); + } + + @Test + public void multi_requirement_two_ranges() { + List req = + ImmutableList.of(CASSANDRA_FROM_1_0_0_TO_2_0_0, CASSANDRA_FROM_3_0_0_TO_3_1_0); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_3_0_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_3_1_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_4_0_0)).isFalse(); + } + + @Test + public void multi_requirement_overlapping() { + List req = + ImmutableList.of(CASSANDRA_FROM_1_0_0_TO_2_0_0, CASSANDRA_FROM_1_1_0); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, DSE, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isFalse(); + } + + @Test + public void multi_requirement_not_range() { + List req = ImmutableList.of(CASSANDRA_TO_1_0_0, CASSANDRA_FROM_1_1_0); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_0_0_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_1_0)).isTrue(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_2_0_0)).isTrue(); + + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_0)).isFalse(); + assertThat(VersionRequirement.meetsAny(req, CASSANDRA, V_1_0_1)).isFalse(); + } +} diff --git a/upgrade_guide/README.md b/upgrade_guide/README.md index 56c91124b96..56d55aaab36 100644 --- a/upgrade_guide/README.md +++ b/upgrade_guide/README.md @@ -1,5 +1,551 @@ + + ## Upgrade guide +### 4.18.1 + +#### Keystore reloading in DefaultSslEngineFactory + +`DefaultSslEngineFactory` now includes an optional keystore reloading interval, for detecting changes in the local +client keystore file. This is relevant in environments with mTLS enabled and short-lived client certificates, especially +when an application restart might not always happen between a new keystore becoming available and the previous +keystore certificate expiring. + +This feature is disabled by default for compatibility. To enable, see `keystore-reload-interval` in `reference.conf`. + +### 4.17.0 + +#### Support for Java17 + +With the completion of [JAVA-3042](https://datastax-oss.atlassian.net/browse/JAVA-3042) the driver now passes our automated test matrix for Java Driver releases. +If you discover an issue with the Java Driver running on Java 17, please let us know. We will triage and address Java 17 issues. + +#### Updated API for vector search + +The 4.16.0 release introduced support for the CQL `vector` datatype. This release modifies the `CqlVector` +value type used to represent a CQL vector to make it easier to use. `CqlVector` now implements the Iterable interface +as well as several methods modelled on the JDK's List interface. For more, see +[JAVA-3060](https://datastax-oss.atlassian.net/browse/JAVA-3060). + +The builder interface was replaced with factory methods that resemble similar methods on `CqlDuration`. +For example, the following code will create a keyspace and table, populate that table with some data, and then execute +a query that will return a `vector` type. This data is retrieved directly via `Row.getVector()` and the resulting +`CqlVector` value object can be interrogated directly. + +```java +try (CqlSession session = new CqlSessionBuilder().withLocalDatacenter("datacenter1").build()) { + + session.execute("DROP KEYSPACE IF EXISTS test"); + session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + session.execute("CREATE TABLE test.foo(i int primary key, j vector)"); + session.execute("CREATE CUSTOM INDEX ann_index ON test.foo(j) USING 'StorageAttachedIndex'"); + session.execute("INSERT INTO test.foo (i, j) VALUES (1, [8, 2.3, 58])"); + session.execute("INSERT INTO test.foo (i, j) VALUES (2, [1.2, 3.4, 5.6])"); + session.execute("INSERT INTO test.foo (i, j) VALUES (5, [23, 18, 3.9])"); + ResultSet rs=session.execute("SELECT j FROM test.foo WHERE j ann of [3.4, 7.8, 9.1] limit 1"); + for (Row row : rs){ + CqlVector v = row.getVector(0, Float.class); + System.out.println(v); + if (Iterables.size(v) != 3) { + throw new RuntimeException("Expected vector with three dimensions"); + } + } +} +``` + +You can also use the `CqlVector` type with prepared statements: + +```java +PreparedStatement preparedInsert = session.prepare("INSERT INTO test.foo (i, j) VALUES (?,?)"); +CqlVector vector = CqlVector.newInstance(1.4f, 2.5f, 3.6f); +session.execute(preparedInsert.bind(3, vector)); +``` + +In some cases, it makes sense to access the vector directly as an array of some numerical type. This version +supports such use cases by providing a codec which translates a CQL vector to and from a primitive array. Only float arrays are supported. +You can find more information about this codec in the manual documentation on [custom codecs](../manual/core/custom_codecs/) + +### 4.15.0 + +#### CodecNotFoundException now extends DriverException + +Before [JAVA-2995](https://datastax-oss.atlassian.net/browse/JAVA-2995), `CodecNotFoundException` +was extending `RuntimeException`. This is a discrepancy as all other exceptions extend +`DriverException`, which in turn extends `RuntimeException`. + +This was causing integrators to do workarounds in order to react on all exceptions correctly. + +The change introduced by JAVA-2995 shouldn't be a problem for most users. But if your code was using +a logic such as below, it won't compile anymore: + +```java +try { + doSomethingWithDriver(); +} catch(DriverException e) { +} catch(CodecNotFoundException e) { +} +``` + +You need to either reverse the catch order and catch `CodecNotFoundException` first: + +```java +try { + doSomethingWithDriver(); +} catch(CodecNotFoundException e) { +} catch(DriverException e) { +} +``` + +Or catch only `DriverException`: + +```java +try { + doSomethingWithDriver(); +} catch(DriverException e) { +} +``` + +### 4.14.0 + +#### AllNodesFailedException instead of NoNodeAvailableException in certain cases + +[JAVA-2959](https://datastax-oss.atlassian.net/browse/JAVA-2959) changed the behavior for when a +request cannot be executed because all nodes tried were busy. Previously you would get back a +`NoNodeAvailableException` but you will now get back an `AllNodesFailedException` where the +`getAllErrors` map contains a `NodeUnavailableException` for that node. + +#### Esri Geometry dependency now optional + +Previous versions of the Java Driver defined a mandatory dependency on the Esri geometry library. +This library offered support for primitive geometric types supported by DSE. As of driver 4.14.0 +this dependency is now optional. + +If you do not use DSE (or if you do but do not use the support for geometric types within DSE) you +should experience no disruption. If you are using geometric types with DSE you'll now need to +explicitly declare a dependency on the Esri library: + +```xml + + com.esri.geometry + esri-geometry-api + ${esri.version} + +``` + +See the [integration](../manual/core/integration/#esri) section in the manual for more details. + +### 4.13.0 + +#### Enhanced support for GraalVM native images + +[JAVA-2940](https://datastax-oss.atlassian.net/browse/JAVA-2940) introduced an enhanced support for +building GraalVM native images. + +If you were building a native image for your application, please verify your native image builder +configuration. Most of the extra configuration required until now is likely to not be necessary +anymore. + +Refer to this [manual page](../manual/core/graalvm) for details. + +#### Registration of multiple listeners and trackers + +[JAVA-2951](https://datastax-oss.atlassian.net/browse/JAVA-2951) introduced the ability to register +more than one instance of the following interfaces: + +* [RequestTracker](https://docs.datastax.com/en/drivers/java/4.12/com/datastax/oss/driver/api/core/tracker/RequestTracker.html) +* [NodeStateListener](https://docs.datastax.com/en/drivers/java/4.12/com/datastax/oss/driver/api/core/metadata/NodeStateListener.html) +* [SchemaChangeListener](https://docs.datastax.com/en/drivers/java/4.12/com/datastax/oss/driver/api/core/metadata/schema/SchemaChangeListener.html) + +Multiple components can now be registered both programmatically and through the configuration. _If +both approaches are used, components will add up and will all be registered_ (whereas previously, +the programmatic approach would take precedence over the configuration one). + +When using the programmatic approach to register multiple components, you should use the new +`SessionBuilder` methods `addRequestTracker`, `addNodeStateListener` and `addSchemaChangeListener`: + +```java +CqlSessionBuilder builder = CqlSession.builder(); +builder + .addRequestTracker(tracker1) + .addRequestTracker(tracker2); +builder + .addNodeStateListener(nodeStateListener1) + .addNodeStateListener(nodeStateListener2); +builder + .addSchemaChangeListener(schemaChangeListener1) + .addSchemaChangeListener(schemaChangeListener2); +``` + +To support registration of multiple components through the configuration, the following +configuration options were deprecated because they only allow one component to be declared: + +* `advanced.request-tracker.class` +* `advanced.node-state-listener.class` +* `advanced.schema-change-listener.class` + +They are still honored, but the driver will log a warning if they are used. They should now be +replaced with the following ones, that accept a list of classes to instantiate, instead of just +one: + +* `advanced.request-tracker.classes` +* `advanced.node-state-listener.classes` +* `advanced.schema-change-listener.classes` + +Example: + +```hocon +datastax-java-driver { + advanced { + # RequestLogger is a driver built-in tracker + request-tracker.classes = [RequestLogger,com.example.app.MyRequestTracker] + node-state-listener.classes = [com.example.app.MyNodeStateListener1,com.example.app.MyNodeStateListener2] + schema-change-listener.classes = [com.example.app.MySchemaChangeListener] + } +} +``` + +When more than one component of the same type is registered, the driver will distribute received +signals to all components in sequence, by order of their registration, starting with the +programmatically-provided ones. If a component throws an error, the error is intercepted and logged. + +### 4.12.0 + +#### MicroProfile Metrics upgraded to 3.0 + +The MicroProfile Metrics library has been upgraded from version 2.4 to 3.0. Since this upgrade +involves backwards-incompatible binary changes, users of this library and of the +`java-driver-metrics-microprofile` module are required to take the appropriate action: + +* If your application is still using MicroProfile Metrics < 3.0, you can still upgrade the core + driver to 4.12, but you now must keep `java-driver-metrics-microprofile` in version 4.11 or lower, + as newer versions will not work. + +* If your application is using MicroProfile Metrics >= 3.0, then you must upgrade to driver 4.12 or + higher, as previous versions of `java-driver-metrics-microprofile` will not work. + +#### Mapper `@GetEntity` and `@SetEntity` methods can now be lenient + +Thanks to [JAVA-2935](https://datastax-oss.atlassian.net/browse/JAVA-2935), `@GetEntity` and +`@SetEntity` methods now have a new `lenient` attribute. + +If the attribute is `false` (the default value), then the source row or the target statement must +contain a matching column for every property in the entity definition. If such a column is not +found, an error will be thrown. This corresponds to the mapper's current behavior prior to the +introduction of the new attribute. + +If the new attribute is explicitly set to `true` however, the mapper will operate on a best-effort +basis and attempt to read or write all entity properties that have a matching column in the source +row or in the target statement, *leaving unmatched properties untouched*. + +This new, lenient behavior allows to achieve the equivalent of driver 3.x +[lenient mapping](https://docs.datastax.com/en/developer/java-driver/3.10/manual/object_mapper/using/#manual-mapping). + +Read the manual pages on [@GetEntity](../manual/mapper/daos/getentity) methods and +[@SetEntity](../manual/mapper/daos/setentity) methods for more details and examples of lenient mode. + +### 4.11.0 + +#### Native protocol V5 is now production-ready + +Thanks to [JAVA-2704](https://datastax-oss.atlassian.net/browse/JAVA-2704), 4.11.0 is the first +version in the driver 4.x series to fully support Cassandra's native protocol version 5, which has +been promoted from beta to production-ready in the upcoming Cassandra 4.0 release. + +Users should not experience any disruption. When connecting to Cassandra 4.0, V5 will be +transparently selected as the protocol version to use. + +#### Customizable metric names, support for metric tags + +[JAVA-2872](https://datastax-oss.atlassian.net/browse/JAVA-2872) introduced the ability to configure +how metric identifiers are generated. Metric names can now be configured, but most importantly, +metric tags are now supported. See the [metrics](../manual/core/metrics/) section of the online +manual, or the `advanced.metrics.id-generator` section in the +[reference.conf](../manual/core/configuration/reference/) file for details. + +Users should not experience any disruption. However, those using metrics libraries that support tags +are encouraged to try out the new `TaggingMetricIdGenerator`, as it generates metric names and tags +that will look more familiar to users of libraries such as Micrometer or MicroProfile Metrics (and +look nicer when exported to Prometheus or Graphite). + +#### New `NodeDistanceEvaluator` API + +All driver built-in load-balancing policies now accept a new optional component called +[NodeDistanceEvaluator]. This component gets invoked each time a node is added to the cluster or +comes back up. If the evaluator returns a non-null distance for the node, that distance will be +used, otherwise the driver will use its built-in logic to assign a default distance to it. + +[NodeDistanceEvaluator]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/loadbalancing/NodeDistanceEvaluator.html + +This component replaces the old "node filter" component. As a consequence, all `withNodeFilter` +methods in `SessionBuilder` are now deprecated and should be replaced by the equivalent +`withNodeDistanceEvaluator` methods. + +If you have an existing node filter implementation, it can be converted to a `NodeDistanceEvaluator` +very easily: + +```java +Predicate nodeFilter = ... +NodeDistanceEvaluator nodeEvaluator = + (node, dc) -> nodeFilter.test(node) ? null : NodeDistance.IGNORED; +``` + +The above can also be achieved by an adapter class as shown below: + +```java +public class NodeFilterToDistanceEvaluatorAdapter implements NodeDistanceEvaluator { + + private final Predicate nodeFilter; + + public NodeFilterToDistanceEvaluatorAdapter(@NonNull Predicate nodeFilter) { + this.nodeFilter = nodeFilter; + } + + @Nullable @Override + public NodeDistance evaluateDistance(@NonNull Node node, @Nullable String localDc) { + return nodeFilter.test(node) ? null : NodeDistance.IGNORED; + } +} +``` + +Finally, the `datastax-java-driver.basic.load-balancing-policy.filter.class` configuration option +has been deprecated; it should be replaced with a node distance evaluator class defined by the +`datastax-java-driver.basic.load-balancing-policy.evaluator.class` option instead. + +### 4.10.0 + +#### Cross-datacenter failover + +[JAVA-2899](https://datastax-oss.atlassian.net/browse/JAVA-2899) re-introduced the ability to +perform cross-datacenter failover using the driver's built-in load balancing policies. See [Load +balancing](../manual/core/loadbalancing/) in the manual for details. + +Cross-datacenter failover is disabled by default, therefore existing applications should not +experience any disruption. + +#### New `RetryVerdict` API + +[JAVA-2900](https://datastax-oss.atlassian.net/browse/JAVA-2900) introduced [`RetryVerdict`], a new +interface that allows custom retry policies to customize the request before it is retried. + +For this reason, the following methods in the `RetryPolicy` interface were added; they all return +a `RetryVerdict` instance: + +1. [`onReadTimeoutVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onReadTimeoutVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-boolean-int-) +2. [`onWriteTimeoutVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onWriteTimeoutVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-com.datastax.oss.driver.api.core.servererrors.WriteType-int-int-int-) +3. [`onUnavailableVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onUnavailableVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-int-) +4. [`onRequestAbortedVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onRequestAbortedVerdict-com.datastax.oss.driver.api.core.session.Request-java.lang.Throwable-int-) +5. [`onErrorResponseVerdict`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onErrorResponseVerdict-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.servererrors.CoordinatorException-int-) + +The following methods were deprecated and will be removed in the next major version: + +1. [`onReadTimeout`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onReadTimeout-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-boolean-int-) +2. [`onWriteTimeout`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onWriteTimeout-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-com.datastax.oss.driver.api.core.servererrors.WriteType-int-int-int-) +3. [`onUnavailable`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onUnavailable-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.ConsistencyLevel-int-int-int-) +4. [`onRequestAborted`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onRequestAborted-com.datastax.oss.driver.api.core.session.Request-java.lang.Throwable-int-) +5. [`onErrorResponse`](https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryPolicy.html#onErrorResponse-com.datastax.oss.driver.api.core.session.Request-com.datastax.oss.driver.api.core.servererrors.CoordinatorException-int-) + +Driver 4.10.0 also re-introduced a retry policy whose behavior is equivalent to the +`DowngradingConsistencyRetryPolicy` from driver 3.x. See this +[FAQ entry](https://docs.datastax.com/en/developer/java-driver/4.11/faq/#where-is-downgrading-consistency-retry-policy) +for more information. + +[`RetryVerdict`]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/retry/RetryVerdict.html + +#### Enhancements to the `Uuids` utility class + +[JAVA-2449](https://datastax-oss.atlassian.net/browse/JAVA-2449) modified the implementation of +[Uuids.random()]: this method does not delegate anymore to the JDK's `java.util.UUID.randomUUID()` +implementation, but instead re-implements random UUID generation using the non-cryptographic +random number generator `java.util.Random`. + +For most users, non-cryptographic strength is enough and this change should translate into better +performance when generating UUIDs for database insertion. However, in the unlikely case where your +application requires cryptographic strength for UUID generation, you should update your code to +use `java.util.UUID.randomUUID()` instead of `com.datastax.oss.driver.api.core.uuid.Uuids.random()` +from now on. + +This release also introduces two new methods for random UUID generation: + +1. [Uuids.random(Random)]: similar to `Uuids.random()` but allows to pass a custom instance of + `java.util.Random` and/or re-use the same instance across calls. +2. [Uuids.random(SplittableRandom)]: similar to `Uuids.random()` but uses a + `java.util.SplittableRandom` instead. + +[Uuids.random()]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-- +[Uuids.random(Random)]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-java.util.Random- +[Uuids.random(SplittableRandom)]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/uuid/Uuids.html#random-java.util.SplittableRandom- + +#### System and DSE keyspaces automatically excluded from metadata and token map computation + +[JAVA-2871](https://datastax-oss.atlassian.net/browse/JAVA-2871) now allows for a more fine-grained +control over which keyspaces should qualify for metadata and token map computation, including the +ability to *exclude* keyspaces based on their names. + +From now on, the following keyspaces are automatically excluded: + +1. The `system` keyspace; +2. All keyspaces starting with `system_`; +3. DSE-specific keyspaces: + 1. All keyspaces starting with `dse_`; + 2. The `solr_admin` keyspace; + 3. The `OpsCenter` keyspace. + +This means that they won't show up anymore in [Metadata.getKeyspaces()], and [TokenMap] will return +empty replicas and token ranges for them. If you need the driver to keep computing metadata and +token map for these keyspaces, you now must modify the following configuration option: +`datastax-java-driver.advanced.metadata.schema.refreshed-keyspaces`. + +[Metadata.getKeyspaces()]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/metadata/Metadata.html#getKeyspaces-- +[TokenMap]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/metadata/TokenMap.html + +#### DSE Graph dependencies are now optional + +Until driver 4.9.0, the driver declared a mandatory dependency to Apache TinkerPop, a library +required only when connecting to DSE Graph. The vast majority of Apache Cassandra users did not need +that library, but were paying the price of having that heavy-weight library in their application's +classpath. + +_Starting with driver 4.10.0, TinkerPop is now considered an optional dependency_. + +Regular users of Apache Cassandra that do not use DSE Graph will not notice any disruption. + +DSE Graph users, however, will now have to explicitly declare a dependency to Apache TinkerPop. This +can be achieved with Maven by adding the following dependencies to the `` section of +your POM file: + +```xml + + org.apache.tinkerpop + gremlin-core + ${tinkerpop.version} + + + org.apache.tinkerpop + tinkergraph-gremlin + ${tinkerpop.version} + +``` + +See the [integration](../manual/core/integration/#tinker-pop) section in the manual for more details +as well as a driver vs. TinkerPop version compatibility matrix. + +### 4.5.x - 4.6.0 + +These versions are subject to [JAVA-2676](https://datastax-oss.atlassian.net/browse/JAVA-2676), a +bug that causes performance degradations in certain scenarios. We strongly recommend upgrading to at +least 4.6.1. + +### 4.4.0 + +DataStax Enterprise support is now available directly in the main driver. There is no longer a +separate DSE driver. + +#### For Apache Cassandra® users + +The great news is that [reactive execution](../manual/core/reactive/) is now available for everyone. +See the `CqlSession.executeReactive` methods. + +Apart from that, the only visible change is that DSE-specific features are now exposed in the API: + +* new execution methods: `CqlSession.executeGraph`, `CqlSession.executeContinuously*`. They all + have default implementations so this doesn't break binary compatibility. You can just ignore them. +* new driver dependencies: TinkerPop, ESRI, Reactive Streams. If you want to keep your classpath + lean, you can exclude some dependencies when you don't use the corresponding DSE features; see the + [Integration>Driver dependencies](../manual/core/integration/#driver-dependencies) section. + +#### For DataStax Enterprise users + +Adjust your Maven coordinates to use the unified artifact: + +```xml + + + com.datastax.dse + dse-java-driver-core + 2.3.0 + + + + + com.datastax.oss + java-driver-core + 4.4.0 + + + +``` + +The new driver is a drop-in replacement for the DSE driver. Note however that we've deprecated a few +DSE-specific types in favor of their OSS equivalents. They still work, so you don't need to make the +changes right away; but you will get deprecation warnings: + +* `DseSession`: use `CqlSession` instead, it can now do everything that a DSE session does. This + also applies to the builder: + + ```java + // Replace: + DseSession session = DseSession.builder().build() + + // By: + CqlSession session = CqlSession.builder().build() + ``` +* `DseDriverConfigLoader`: the driver no longer needs DSE-specific config loaders. All the factory + methods in this class now redirect to `DriverConfigLoader`. On that note, `dse-reference.conf` + does not exist anymore, all the driver defaults are now in + [reference.conf](../manual/core/configuration/reference/). +* plain-text authentication: there is now a single implementation that works with both Cassandra and + DSE. If you used `DseProgrammaticPlainTextAuthProvider`, replace it by + `PlainTextProgrammaticAuthProvider`. Similarly, if you wrote a custom implementation by + subclassing `DsePlainTextAuthProviderBase`, extend `PlainTextAuthProviderBase` instead. +* `DseLoadBalancingPolicy`: DSE-specific features (the slow replica avoidance mechanism) have been + merged into `DefaultLoadBalancingPolicy`. `DseLoadBalancingPolicy` still exists for backward + compatibility, but it is now identical to the default policy. + +#### Class Loader + +The default class loader used by the driver when instantiating classes by reflection changed. +Unless specified by the user, the driver will now use the same class loader that was used to load +the driver classes themselves, in order to ensure that implemented interfaces and implementing +classes are fully compatible. + +This should ensure a more streamlined experience for OSGi users, who do not need anymore to define +a specific class loader to use. + +However if you are developing a web application and your setup corresponds to the following +scenario, then you will now be required to explicitly define another class loader to use: if in your +application the driver jar is loaded by the web server's system class loader (for example, +because the driver jar was placed in the "/lib" folder of the web server), then the default class +loader will be the server's system class loader. Then if the application tries to load, say, a +custom load balancing policy declared in the web app's "WEB-INF/lib" folder, then the default class +loader will not be able to locate that class. Instead, you must use the web app's class loader, that +you can obtain in most web environments by calling `Thread.getContextClassLoader()`: + + CqlSession.builder() + .addContactEndPoint(...) + .withClassLoader(Thread.currentThread().getContextClassLoader()) + .build(); + +See the javadocs of [SessionBuilder.withClassLoader] for more information. + +[SessionBuilder.withClassLoader]: https://docs.datastax.com/en/drivers/java/4.11/com/datastax/oss/driver/api/core/session/SessionBuilder.html#withClassLoader-java.lang.ClassLoader- + ### 4.1.0 #### Object mapper @@ -50,7 +596,7 @@ We have dropped support for legacy protocol versions v1 and v2. As a result, the compatible with: * **Apache Cassandra®: 2.1 and above**; -* **Datastax Enterprise: 4.7 and above**. +* **DataStax Enterprise: 4.7 and above**. #### Packages @@ -67,7 +613,7 @@ import com.datastax.driver.core.Row; import com.datastax.driver.core.SimpleStatement; SimpleStatement statement = - new SimpleStatement("SELECT release_version FROM system.local"); + new SimpleStatement("SELECT release_version FROM system.local"); ResultSet resultSet = session.execute(statement); Row row = resultSet.one(); System.out.println(row.getString("release_version")); @@ -79,7 +625,7 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.SimpleStatement; SimpleStatement statement = - SimpleStatement.newInstance("SELECT release_version FROM system.local"); + SimpleStatement.newInstance("SELECT release_version FROM system.local"); ResultSet resultSet = session.execute(statement); Row row = resultSet.one(); System.out.println(row.getString("release_version")); @@ -142,9 +688,9 @@ datastax-java-driver { // Application code: SimpleStatement statement1 = - SimpleStatement.newInstance("...").setExecutionProfileName("profile1"); + SimpleStatement.newInstance("...").setExecutionProfileName("profile1"); SimpleStatement statement2 = - SimpleStatement.newInstance("...").setExecutionProfileName("profile2"); + SimpleStatement.newInstance("...").setExecutionProfileName("profile2"); ``` The configuration can be reloaded periodically at runtime: @@ -263,13 +809,13 @@ propagating its own consistency level to its bound statements: ```java PreparedStatement ps1 = - session.prepare( - SimpleStatement.newInstance("SELECT * FROM product WHERE sku = ?") - .setConsistencyLevel(DefaultConsistencyLevel.ONE)); + session.prepare( + SimpleStatement.newInstance("SELECT * FROM product WHERE sku = ?") + .setConsistencyLevel(DefaultConsistencyLevel.ONE)); PreparedStatement ps2 = - session.prepare( - SimpleStatement.newInstance("SELECT * FROM product WHERE sku = ?") - .setConsistencyLevel(DefaultConsistencyLevel.TWO)); + session.prepare( + SimpleStatement.newInstance("SELECT * FROM product WHERE sku = ?") + .setConsistencyLevel(DefaultConsistencyLevel.TWO)); assert ps1 != ps2; @@ -280,6 +826,9 @@ BoundStatement bs2 = ps2.bind(); assert bs2.getConsistencyLevel() == DefaultConsistencyLevel.TWO; ``` +DDL statements are now debounced; see [Why do DDL queries have a higher latency than driver +3?](../faq/#why-do-ddl-queries-have-a-higher-latency-than-driver-3) in the FAQ. + #### Dual result set APIs In 3.x, both synchronous and asynchronous execution models shared a common result set @@ -367,8 +916,8 @@ Optional ks = metadata.getKeyspace("test"); assert !ks.isPresent(); session.execute( - "CREATE KEYSPACE IF NOT EXISTS test " - + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); + "CREATE KEYSPACE IF NOT EXISTS test " + + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); // This is still the same metadata from before the CREATE ks = metadata.getKeyspace("test");